summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/bluetooth/btmtk.c7
-rw-r--r--drivers/bluetooth/btqca.c38
-rw-r--r--drivers/bluetooth/btusb.c11
-rw-r--r--drivers/bluetooth/hci_qca.c29
-rw-r--r--drivers/clk/clk.c173
-rw-r--r--drivers/clk/mediatek/clk-mt7988-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mtk.c15
-rw-r--r--drivers/comedi/drivers/vmk80xx.c35
-rw-r--r--drivers/dpll/dpll_core.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c72
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c7
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c2
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c2
-rw-r--r--drivers/gpu/drm/radeon/pptable.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c38
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c44
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c8
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c21
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-mcp2221.c2
-rw-r--r--drivers/hid/hid-nintendo.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c38
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c2
-rw-r--r--drivers/infiniband/core/cm.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/interconnect/core.c8
-rw-r--r--drivers/interconnect/qcom/x1e80100.c26
-rw-r--r--drivers/iommu/iommufd/Kconfig1
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/platform-vsc.c17
-rw-r--r--drivers/misc/mei/vsc-tp.c84
-rw-r--r--drivers/misc/mei/vsc-tp.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c56
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h23
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c21
-rw-r--r--drivers/net/ethernet/broadcom/b44.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c16
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c38
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c11
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c5
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c8
-rw-r--r--drivers/net/gtp.c3
-rw-r--r--drivers/net/macsec.c46
-rw-r--r--drivers/net/phy/dp83869.c3
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c43
-rw-r--r--drivers/net/usb/ax88179_178a.c11
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/vxlan/vxlan_core.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c2
-rw-r--r--drivers/nfc/trf7970a.c42
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/device_fsm.c5
-rw-r--r--drivers/s390/cio/device_ops.c8
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/thermal/thermal_debugfs.c1
-rw-r--r--drivers/thunderbolt/switch.c48
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/thunderbolt/tb.h3
-rw-r--r--drivers/thunderbolt/usb4.c13
-rw-r--r--drivers/tty/serial/8250/8250_dw.c6
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/mxs-auart.c8
-rw-r--r--drivers/tty/serial/pmac_zilog.c14
-rw-r--r--drivers/tty/serial/serial_base.h4
-rw-r--r--drivers/tty/serial/serial_core.c23
-rw-r--r--drivers/tty/serial/serial_port.c34
-rw-r--r--drivers/tty/serial/stm32-usart.c13
-rw-r--r--drivers/ufs/host/ufs-qcom.c8
-rw-r--r--drivers/usb/class/cdc-wdm.c6
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c4
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/gadget/function/f_fs.c29
-rw-r--r--drivers/usb/gadget/function/f_ncm.c4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c5
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci-trace.h12
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c6
-rw-r--r--drivers/usb/serial/option.c40
-rw-r--r--drivers/usb/typec/mux/it5205.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c6
129 files changed, 1412 insertions, 706 deletions
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 1fbc9b921c4f..736c2eb8c0f3 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bad28cf42010..dd6923d37931 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index ac8ebccd3507..812fd2a8f853 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -380,8 +380,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
break;
+ }
data->cd_info.cnt = 0;
/* It is supposed coredump can be done within 5 seconds */
@@ -407,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
- if (err < 0)
- kfree_skb(skb);
-
return err;
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 19cfc342fc7b..216826c31ee3 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -15,6 +15,8 @@
#define VERSION "0.1"
+#define QCA_BDADDR_DEFAULT (&(bdaddr_t) {{ 0xad, 0x5a, 0x00, 0x00, 0x00, 0x00 }})
+
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type soc_type)
{
@@ -612,6 +614,38 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+static int qca_check_bdaddr(struct hci_dev *hdev)
+{
+ struct hci_rp_read_bd_addr *bda;
+ struct sk_buff *skb;
+ int err;
+
+ if (bacmp(&hdev->public_addr, BDADDR_ANY))
+ return 0;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to read device address (%d)", err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*bda)) {
+ bt_dev_err(hdev, "Device address length mismatch");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ bda = (struct hci_rp_read_bd_addr *)skb->data;
+ if (!bacmp(&bda->bdaddr, QCA_BDADDR_DEFAULT))
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
@@ -818,6 +852,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
break;
}
+ err = qca_check_bdaddr(hdev);
+ if (err)
+ return err;
+
bt_dev_info(hdev, "QCA setup on UART is completed");
return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 06e915b57283..e3946f7b736e 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -542,6 +542,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
@@ -3480,13 +3482,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_coredump_qca(struct hci_dev *hdev)
{
+ int err;
static const u8 param[] = { 0x26 };
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
- kfree_skb(skb);
+ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+ if (err < 0)
+ bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
/*
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index ecbc52eaf101..0c9c9ee56592 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1672,6 +1672,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
+ if (!hu->serdev)
+ return true;
+
/* BT SoC attached through the serial bus is handled by the serdev driver.
* So we need to use the device handle of the serdev driver to get the
* status of device may wakeup.
@@ -1905,8 +1908,6 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
-
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bdaddr_property_broken)
set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
@@ -1957,8 +1958,10 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
- if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
- hu->hdev->wakeup = qca_wakeup;
+ if (hu->serdev) {
+ if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
+ hu->hdev->wakeup = qca_wakeup;
+ }
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
@@ -2329,16 +2332,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- power_ctrl_enabled = false;
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
- data->soc_type == QCA_WCN7850))
- dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ data->soc_type == QCA_WCN7850)) {
+ dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ return PTR_ERR(qcadev->sw_ctrl);
+ }
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
@@ -2357,10 +2365,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en)) {
- dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
- power_ctrl_enabled = false;
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 25371c91a58f..8cca52be993f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
@@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+ int ret;
+ struct clk_core *core, *failed;
+
+ /*
+ * Grab the list lock to prevent any new clks from being registered
+ * or unregistered until clk_pm_runtime_put_all().
+ */
+ mutex_lock(&clk_rpm_list_lock);
+
+ /*
+ * Runtime PM "get" all the devices that are needed for the clks
+ * currently registered. Do this without holding the prepare_lock, to
+ * avoid the deadlock.
+ */
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ ret = clk_pm_runtime_get(core);
+ if (ret) {
+ failed = core;
+ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+ dev_name(failed->dev), failed->name);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ if (core == failed)
+ break;
+
+ clk_pm_runtime_put(core);
+ }
+ mutex_unlock(&clk_rpm_list_lock);
+
+ return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+ clk_pm_runtime_put(core);
+ mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+ struct device *dev = core->dev;
+
+ if (dev && pm_runtime_enabled(dev)) {
+ core->rpm_enabled = true;
+
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_add_head(&core->rpm_node, &clk_rpm_list);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -1381,9 +1469,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
- if (clk_pm_runtime_get(core))
- return;
-
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -1392,8 +1477,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
-
- clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1409,9 +1492,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
- if (clk_pm_runtime_get(core))
- goto unprepare_out;
-
flags = clk_enable_lock();
if (core->enable_count)
@@ -1436,8 +1516,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
- clk_pm_runtime_put(core);
-unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1453,6 +1531,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
+ int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
@@ -1461,6 +1540,13 @@ static int __init clk_disable_unused(void)
pr_info("clk: Disabling unused clocks\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+ /*
+ * Grab the prepare lock to keep the clk topology stable while iterating
+ * over clks.
+ */
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1477,6 +1563,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
return 0;
}
late_initcall_sync(clk_disable_unused);
@@ -3252,9 +3340,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
- clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
- clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -3264,11 +3350,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = s->private;
+ int ret;
seq_puts(s, " enable prepare protect duty hardware connection\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
clk_prepare_lock();
@@ -3277,6 +3367,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
return 0;
}
@@ -3324,8 +3415,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
struct clk_core *c;
bool first_node = true;
struct hlist_head **lists = s->private;
+ int ret;
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
seq_putc(s, '{');
+
clk_prepare_lock();
for (; *lists; lists++) {
@@ -3338,6 +3435,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
@@ -3981,8 +4079,6 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
-
- kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@@ -4211,6 +4307,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+ struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+ if (core->rpm_enabled) {
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_del(&core->rpm_node);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+
+ clk_core_free_parent_map(core);
+ kfree_const(core->name);
+ kfree(core);
+}
+
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@@ -4231,6 +4343,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
+ kref_init(&core->ref);
+
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@@ -4243,9 +4357,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
- if (dev && pm_runtime_enabled(dev))
- core->rpm_enabled = true;
core->dev = dev;
+ clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@@ -4285,12 +4398,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
- clk_core_free_parent_map(core);
fail_parents:
fail_ops:
- kfree_const(core->name);
fail_name:
- kfree(core);
+ kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@@ -4370,18 +4481,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
-{
- struct clk_core *core = container_of(ref, struct clk_core, ref);
-
- lockdep_assert_held(&prepare_lock);
-
- clk_core_free_parent_map(core);
- kfree_const(core->name);
- kfree(core);
-}
-
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
@@ -4472,7 +4571,8 @@ void clk_unregister(struct clk *clk)
if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
- goto unlock;
+ clk_prepare_unlock();
+ return;
}
/*
* Assign empty clock ops for consumers that might still hold
@@ -4506,11 +4606,10 @@ void clk_unregister(struct clk *clk)
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
+ clk_prepare_unlock();
kref_put(&clk->core->ref, __clk_release);
free_clk(clk);
-unlock:
- clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -4669,13 +4768,11 @@ void __clk_put(struct clk *clk)
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
- owner = clk->core->owner;
- kref_put(&clk->core->ref, __clk_release);
-
clk_prepare_unlock();
+ owner = clk->core->owner;
+ kref_put(&clk->core->ref, __clk_release);
module_put(owner);
-
free_clk(clk);
}
diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c
index 449041f8abbc..c8c023afe3e5 100644
--- a/drivers/clk/mediatek/clk-mt7988-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c
@@ -156,7 +156,7 @@ static const struct mtk_gate infra_clks[] = {
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
"csw_infra_f26m_sel", 8),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
- "csw_infra_f26m_sel", 9),
+ "infra_pcie_peri_ck_26m_ck_p3", 9),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
"csw_infra_f26m_sel", 10),
/* INFRA1 */
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 2e55368dc4d8..bd37ab4d1a9b 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-mtk.h"
@@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
}
+
+ devm_pm_runtime_enable(&pdev->dev);
+ /*
+ * Do a pm_runtime_resume_and_get() to workaround a possible
+ * deadlock between clk_register() and the genpd framework.
+ */
+ r = pm_runtime_resume_and_get(&pdev->dev);
+ if (r)
+ return r;
+
/* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks;
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
@@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks;
}
+ pm_runtime_put(&pdev->dev);
+
return r;
unregister_clks:
@@ -604,6 +617,8 @@ free_data:
free_base:
if (mcd->shared_io && base)
iounmap(base);
+
+ pm_runtime_put(&pdev->dev);
return r;
}
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4536ed43f65b..84dce5184a77 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2)
- return -ENODEV;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep_desc) ||
- usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
+ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
+ int ret;
- if (usb_endpoint_is_int_out(ep_desc) ||
- usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
+ if (devpriv->model == VMK8061_MODEL)
+ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
+ &ep_tx_desc, NULL, NULL);
+ else
+ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
+ &ep_rx_desc, &ep_tx_desc);
- if (!devpriv->ep_rx || !devpriv->ep_tx)
+ if (ret)
return -ENODEV;
+ devpriv->ep_rx = ep_rx_desc;
+ devpriv->ep_tx = ep_tx_desc;
+
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 64eaca80d736..d0f6693ca142 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -42,6 +42,7 @@ struct dpll_pin_registration {
struct list_head list;
const struct dpll_pin_ops *ops;
void *priv;
+ void *cookie;
};
struct dpll_device *dpll_device_get_by_id(int id)
@@ -54,12 +55,14 @@ struct dpll_device *dpll_device_get_by_id(int id)
static struct dpll_pin_registration *
dpll_pin_registration_find(struct dpll_pin_ref *ref,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
list_for_each_entry(reg, &ref->registration_list, list) {
- if (reg->ops == ops && reg->priv == priv)
+ if (reg->ops == ops && reg->priv == priv &&
+ reg->cookie == cookie)
return reg;
}
return NULL;
@@ -67,7 +70,8 @@ dpll_pin_registration_find(struct dpll_pin_ref *ref,
static int
dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -78,7 +82,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -111,6 +115,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -119,7 +124,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -128,7 +134,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return -EINVAL;
list_del(&reg->list);
@@ -146,7 +152,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
static int
dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -157,7 +163,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -190,6 +196,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -199,7 +206,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
static void
dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -208,7 +215,7 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return;
list_del(&reg->list);
@@ -594,14 +601,14 @@ EXPORT_SYMBOL_GPL(dpll_pin_put);
static int
__dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
int ret;
- ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv);
+ ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv, cookie);
if (ret)
return ret;
- ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv);
+ ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv, cookie);
if (ret)
goto ref_pin_del;
xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
@@ -610,7 +617,7 @@ __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
return ret;
ref_pin_del:
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
return ret;
}
@@ -642,7 +649,7 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
dpll->clock_id == pin->clock_id)))
ret = -EINVAL;
else
- ret = __dpll_pin_register(dpll, pin, ops, priv);
+ ret = __dpll_pin_register(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
return ret;
@@ -651,11 +658,11 @@ EXPORT_SYMBOL_GPL(dpll_pin_register);
static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
ASSERT_DPLL_PIN_REGISTERED(pin);
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
- dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
+ dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
if (xa_empty(&pin->dpll_refs))
xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
}
@@ -680,7 +687,7 @@ void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- __dpll_pin_unregister(dpll, pin, ops, priv);
+ __dpll_pin_unregister(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_unregister);
@@ -716,12 +723,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
return -EINVAL;
mutex_lock(&dpll_lock);
- ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
+ ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
if (ret)
goto unlock;
refcount_inc(&pin->refcount);
xa_for_each(&parent->dpll_refs, i, ref) {
- ret = __dpll_pin_register(ref->dpll, pin, ops, priv);
+ ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
if (ret) {
stop = i;
goto dpll_unregister;
@@ -735,11 +742,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
dpll_unregister:
xa_for_each(&parent->dpll_refs, i, ref)
if (i < stop) {
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv,
+ parent);
dpll_pin_delete_ntf(pin);
}
refcount_dec(&pin->refcount);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
unlock:
mutex_unlock(&dpll_lock);
return ret;
@@ -764,10 +772,10 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
refcount_dec(&pin->refcount);
xa_for_each(&pin->dpll_refs, i, ref)
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0a4b09709cfb..ec888fc6ead8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 010b0cb7693c..2099159a693f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.resource->mem_type == TTM_PL_VRAM &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj;
unsigned int domain;
bool shared;
/* Abort if the BO doesn't currently have a backing store */
- if (!bo->tbo.resource)
+ if (!res)
return;
obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj);
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+ domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size;
if (shared)
stats->vram_shared += size;
@@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->resource->mem_type != TTM_PL_VRAM)
- return 0;
-
- if (amdgpu_bo_in_cpu_visible_vram(abo))
+ if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
- !amdgpu_bo_in_cpu_visible_vram(abo))
+ !amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
@@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
+
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index be679c42b0b8..fa03d9e4874c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -251,28 +251,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
}
/**
- * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
- */
-static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_res_cursor cursor;
-
- if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
- return false;
-
- amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
- while (cursor.remaining) {
- if (cursor.start < adev->gmc.visible_vram_size)
- return true;
-
- amdgpu_res_next(&cursor, cursor.size);
- }
-
- return false;
-}
-
-/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index fc418e670fda..1d71729e3f6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- amdgpu_bo_in_cpu_visible_vram(abo)) {
+ amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@@ -403,40 +403,55 @@ error:
return r;
}
-/*
- * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
+/**
+ * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
+ * @adev: amdgpu device
+ * @res: the resource to check
*
- * Called by amdgpu_bo_move()
+ * Returns: true if the full resource is CPU visible, false otherwise.
*/
-static bool amdgpu_mem_visible(struct amdgpu_device *adev,
- struct ttm_resource *mem)
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res)
{
- u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
- u64 end;
- if (mem->mem_type == TTM_PL_SYSTEM ||
- mem->mem_type == TTM_PL_TT)
+ if (!res)
+ return false;
+
+ if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
+ res->mem_type == AMDGPU_PL_PREEMPT)
return true;
- if (mem->mem_type != TTM_PL_VRAM)
+
+ if (res->mem_type != TTM_PL_VRAM)
return false;
- amdgpu_res_first(mem, 0, mem_size, &cursor);
- end = cursor.start + cursor.size;
+ amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
+ if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
+ return false;
amdgpu_res_next(&cursor, cursor.size);
+ }
- if (!cursor.remaining)
- break;
+ return true;
+}
- /* ttm_resource_ioremap only supports contiguous memory */
- if (end != cursor.start)
- return false;
+/*
+ * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
+ *
+ * Called by amdgpu_bo_move()
+ */
+static bool amdgpu_res_copyable(struct amdgpu_device *adev,
+ struct ttm_resource *mem)
+{
+ if (!amdgpu_res_cpu_visible(adev, mem))
+ return false;
- end = cursor.start + cursor.size;
- }
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (mem->mem_type == TTM_PL_VRAM &&
+ !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
+ return false;
- return end <= adev->gmc.visible_vram_size;
+ return true;
}
/*
@@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
/* Check that all memory is CPU accessible */
- if (!amdgpu_mem_visible(adev, old_mem) ||
- !amdgpu_mem_visible(adev, new_mem)) {
+ if (!amdgpu_res_copyable(adev, old_mem) ||
+ !amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}
@@ -557,7 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@@ -568,9 +582,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- /* check if it's visible */
- if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
- return -EINVAL;
if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 65ec82141a8e..32cf6b6f6efd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res);
+
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4299ce386322..94089069c9ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1613,6 +1613,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo,
+ uint64_t saddr,
+ uint64_t offset,
+ uint64_t size)
+{
+ uint64_t tmp, lpfn;
+
+ if (saddr & AMDGPU_GPU_PAGE_MASK
+ || offset & AMDGPU_GPU_PAGE_MASK
+ || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+
+ if (check_add_overflow(saddr, size, &tmp)
+ || check_add_overflow(offset, size, &tmp)
+ || size == 0 /* which also leads to end < begin */)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ if (bo && offset + size > amdgpu_bo_size(bo))
+ return -EINVAL;
+
+ /* Ensure last pfn not exceed max_pfn */
+ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+ if (lpfn >= adev->vm_manager.max_pfn)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -1639,21 +1670,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
+ int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@@ -1706,17 +1730,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -1730,7 +1746,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@@ -1817,10 +1833,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
+ int r;
+
+ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+ if (r)
+ return r;
- eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 717a60d7a4ea..b79986412cd8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
- return ERR_PTR(-EINVAL);
+ process = ERR_PTR(-EINVAL);
+ goto out;
}
/* A prior open of /dev/kfd could have already created the process. */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index e224a028d68a..8a0460e86309 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -248,14 +248,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
- enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
-
- enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
enc10->base.transmitter = init_data->transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index 81e349d5835b..da94e5309fba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 479effcf607e..79cfab53f80e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
+#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
- fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
- bios->legacy.i2c_indices.crt, 1, 1);
+ bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
- all_heads, 0);
+ all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
- all_heads, 1);
+ all_heads, DCB_OUTPUT_B);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 7de7707ec6a8..a72c45809484 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -225,12 +225,18 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
u8 *dpcd = nv_encoder->dp.dpcd;
int ret = NOUVEAU_DP_NONE, hpd;
- /* If we've already read the DPCD on an eDP device, we don't need to
- * reread it as it won't change
+ /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
+ * haven't probed them once before.
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
- dpcd[DP_DPCD_REV] != 0)
- return NOUVEAU_DP_SST;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (connector->status == connector_status_connected)
+ return NOUVEAU_DP_SST;
+ else if (connector->status == connector_status_disconnected)
+ return NOUVEAU_DP_NONE;
+ }
+
+ // Ensure that the aux bus is enabled for probing
+ drm_dp_dpcd_set_powered(&nv_connector->aux, true);
mutex_lock(&nv_encoder->dp.hpd_irq_lock);
if (mstm) {
@@ -293,6 +299,13 @@ out:
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
nv50_mstm_remove(mstm);
+ /* GSP doesn't like when we try to do aux transactions on a port it considers disconnected,
+ * and since we don't really have a usecase for that anyway - just disable the aux bus here
+ * if we've decided the connector is disconnected
+ */
+ if (ret == NOUVEAU_DP_NONE)
+ drm_dp_dpcd_set_powered(&nv_connector->aux, false);
+
mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index a7f3fc342d87..dd5b5a17ece0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
- if (refcount_inc_not_zero(&iobj->maps))
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ /* read barrier match the wmb on refcount set */
+ smp_rmb();
return iobj->map;
+ }
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
+ /* barrier to ensure the ptrs are written before refcount is set */
+ smp_wmb();
refcount_set(&iobj->maps, 1);
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index cb7406d74466..c39fe0fc5d69 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -614,8 +614,6 @@ static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 775144695283..b15ca56a09a7 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 94947229888b..b7f22597ee95 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
typedef struct _ATOM_PPLIB_STATE_V2
{
//number of valid dpm levels in this state; Driver uses it to calculate the whole
- //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos
@@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
/**
* Driver will read the first ucNumDPMLevels in this array
*/
- UCHAR clockInfoIndex[1];
+ UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
- ATOM_PPLIB_STATE_V2 states[1];
+ ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
}StateArray;
@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[1];
+ UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray;
typedef struct _NonClockInfoArray{
@@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bb1f0a3371ab..10793a433bf5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
- ATOM_CONNECTOR_INFO_I2C ci =
- supported_devices->info.asConnInfo[i];
+ ATOM_CONNECTOR_INFO_I2C ci;
+
+ if (frev > 1)
+ ci = supported_devices->info_2d1.asConnInfo[i];
+ else
+ ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 112438d965ff..6e1fd6985ffc 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
+ if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_uncached[order];
@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
- if (use_dma_alloc || nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_init(&pool->caching[i].orders[j],
- pool, i, j);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ /* Initialize only pool types which are actually used */
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_init(pt, pool, i, j);
+ }
}
}
EXPORT_SYMBOL(ttm_pool_init);
@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_fini(pt);
+ }
}
/* We removed the pool types from the LRU, but we need to also make sure
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 2e04f6cb661e..ce6b2fb341d1 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -105,7 +105,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_BIN];
- file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
file->jobs_sent[V3D_BIN]++;
v3d->queue[V3D_BIN].jobs_sent++;
@@ -126,7 +125,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
- file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
file->jobs_sent[V3D_RENDER]++;
v3d->queue[V3D_RENDER].jobs_sent++;
@@ -147,7 +145,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_CSD];
- file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
file->jobs_sent[V3D_CSD]++;
v3d->queue[V3D_CSD].jobs_sent++;
@@ -195,7 +192,6 @@ v3d_hub_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_TFU];
- file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
file->jobs_sent[V3D_TFU]++;
v3d->queue[V3D_TFU].jobs_sent++;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index c52c7bf1485b..717d624e9a05 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
.no_wait_gpu = false
};
u32 j, initial_line = dst_offset / dst_stride;
- struct vmw_bo_blit_line_data d;
+ struct vmw_bo_blit_line_data d = {0};
int ret = 0;
+ struct page **dst_pages = NULL;
+ struct page **src_pages = NULL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret;
}
+ if (!src->ttm->pages && src->ttm->sg) {
+ src_pages = kvmalloc_array(src->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!src_pages)
+ return -ENOMEM;
+ ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
+ src->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+ if (!dst->ttm->pages && dst->ttm->sg) {
+ dst_pages = kvmalloc_array(dst->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!dst_pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
+ dst->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+
d.mapped_dst = 0;
d.mapped_src = 0;
d.dst_addr = NULL;
d.src_addr = NULL;
- d.dst_pages = dst->ttm->pages;
- d.src_pages = src->ttm->pages;
+ d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
+ d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@@ -504,6 +529,10 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
+ if (src_pages)
+ kvfree(src_pages);
+ if (dst_pages)
+ kvfree(dst_pages);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index bfd41ce3c8f4..e5eb21a471a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
{
struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel,
- .no_wait_gpu = false
+ .no_wait_gpu = false,
+ .resv = params->resv,
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
@@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
- &vmw_bo->placement, 0, &ctx, NULL,
- NULL, destroy);
+ &vmw_bo->placement, 0, &ctx,
+ params->sg, params->resv, destroy);
if (unlikely(ret))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 0d496dc9c6af..f349642e6190 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -55,6 +55,8 @@ struct vmw_bo_params {
enum ttm_bo_type bo_type;
size_t size;
bool pin;
+ struct dma_resv *resv;
+ struct sg_table *sg;
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0a304706e013..58fb40c93100 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1628,6 +1628,7 @@ static const struct drm_driver driver = {
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
+ .gem_prime_import_sg_table = vmw_prime_import_sg_table,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 12efecc17df6..b019a1a1787a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1130,6 +1130,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table);
/*
* MemoryOBject management - vmwgfx_mob.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 12787bb9c111..d6bcaf078b1f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -149,6 +149,38 @@ out_no_bo:
return ret;
}
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ int ret;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_gem_object *gem = NULL;
+ struct vmw_bo *vbo;
+ struct vmw_bo_params params = {
+ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_sg,
+ .size = attach->dmabuf->size,
+ .pin = false,
+ .resv = attach->dmabuf->resv,
+ .sg = table,
+
+ };
+
+ dma_resv_lock(params.resv, NULL);
+
+ ret = vmw_bo_create(dev_priv, &params, &vbo);
+ if (ret != 0)
+ goto out_no_bo;
+
+ vbo->tbo.base.funcs = &vmw_gem_object_funcs;
+
+ gem = &vbo->tbo.base;
+out_no_bo:
+ dma_resv_unlock(params.resv);
+ return gem;
+}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index cd4925346ed4..84ae4e10a2eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -933,6 +933,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -940,9 +941,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
- /* We always want to have an active plane with an active CRTC */
- if (has_primary != new_state->enable)
- return -EINVAL;
+ /*
+ * This is fine in general, but broken userspace might expect
+ * some actual rendering so give a clue as why it's blank.
+ */
+ if (new_state->enable && !has_primary)
+ drm_dbg_driver(&vmw->drm,
+ "CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index a94947b588e8..19a843da87b7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
};
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 2d72a5ee7c0c..c99cad444991 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
- return ttm_prime_fd_to_handle(tfile, fd, handle);
+ if (ret)
+ ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+
+ return ret;
}
int vmw_prime_handle_to_fd(struct drm_device *dev,
@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ int ret;
+
+ if (handle > VMWGFX_NUM_MOB)
+ ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ else
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 4d23d0a70bcb..621d98b376bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- vsgt->sgt = &vmw_tt->sgt;
- ret = sg_alloc_table_from_pages_segment(
- &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
- (unsigned long)vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
- if (ret)
- goto out_sg_alloc_fail;
+ if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
+ vsgt->sgt = vmw_tt->dma_ttm.sg;
+ } else {
+ vsgt->sgt = &vmw_tt->sgt;
+ ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
+ vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev),
+ GFP_KERNEL);
+ if (ret)
+ goto out_sg_alloc_fail;
+ }
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
@@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0;
out_map_fail:
- sg_free_table(vmw_tt->vsgt.sgt);
- vmw_tt->vsgt.sgt = NULL;
+ drm_warn(&dev_priv->drm, "VSG table map failed!");
+ sg_free_table(vsgt->sgt);
+ vsgt->sgt = NULL;
out_sg_alloc_fail:
return ret;
}
@@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- int ret;
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
- /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (external && ttm->sg)
+ return drm_prime_sg_to_dma_addr_array(ttm->sg,
+ ttm->dma_address,
+ ttm->num_pages);
- return ret;
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (external)
+ return;
vmw_ttm_unbind(bdev, ttm);
@@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct vmw_ttm_tt *vmw_be;
int ret;
+ bool external = bo->type == ttm_bo_type_sg;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
@@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+ if (external)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index b21da7b745a5..a9c1f9885c6b 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -31,7 +31,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
if (ret)
- return ret;
+ goto err;
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
/*
@@ -42,12 +42,16 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
ttm_bo_unreserve(&bo->ttm);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
bo->flags |= XE_BO_SCANOUT_BIT;
}
ttm_bo_unreserve(&bo->ttm);
+ return 0;
+err:
+ xe_bo_put(bo);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 62d1ef8867a8..3d4c8f342e21 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1577,6 +1577,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe->usm.num_vm_in_fault_mode--;
else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe->usm.num_vm_in_non_fault_mode--;
+
+ if (vm->usm.asid) {
+ void *lookup;
+
+ xe_assert(xe, xe->info.has_asid);
+ xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
+
+ lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
+ xe_assert(xe, lookup == vm);
+ }
mutex_unlock(&xe->usm.lock);
for_each_tile(tile, xe, id)
@@ -1592,24 +1602,15 @@ static void vm_destroy_work_func(struct work_struct *w)
struct xe_device *xe = vm->xe;
struct xe_tile *tile;
u8 id;
- void *lookup;
/* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size);
mutex_destroy(&vm->snap_mutex);
- if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
+ if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_put(xe);
- if (xe->info.has_asid && vm->usm.asid) {
- mutex_lock(&xe->usm.lock);
- lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
- xe_assert(xe, lookup == vm);
- mutex_unlock(&xe->usm.lock);
- }
- }
-
for_each_tile(tile, xe, id)
XE_WARN_ON(vm->pt_root[id]);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index e6a8b6d8eab7..3c3c497b6b91 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
}
break;
case REPORT_TYPE_MOUSE:
- workitem->reports_supported |= STD_MOUSE | HIDPP;
- if (djrcv_dev->type == recvr_type_mouse_only)
- workitem->reports_supported |= MULTIMEDIA;
+ workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
break;
}
}
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index f9cceaeffd08..da5ea5a23b08 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -944,9 +944,11 @@ static void mcp2221_hid_unregister(void *ptr)
/* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
static void mcp2221_remove(struct hid_device *hdev)
{
+#if IS_REACHABLE(CONFIG_IIO)
struct mcp2221 *mcp = hid_get_drvdata(hdev);
cancel_delayed_work_sync(&mcp->init_work);
+#endif
}
#if IS_REACHABLE(CONFIG_IIO)
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index ab5953fc2436..80e0f23c1c33 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -481,10 +481,10 @@ static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
{ BTN_TR, JC_BTN_R, },
{ BTN_TR2, JC_BTN_LSTICK, }, /* ZR */
{ BTN_START, JC_BTN_PLUS, },
- { BTN_FORWARD, JC_BTN_Y, }, /* C UP */
- { BTN_BACK, JC_BTN_ZR, }, /* C DOWN */
- { BTN_LEFT, JC_BTN_X, }, /* C LEFT */
- { BTN_RIGHT, JC_BTN_MINUS, }, /* C RIGHT */
+ { BTN_SELECT, JC_BTN_Y, }, /* C UP */
+ { BTN_X, JC_BTN_ZR, }, /* C DOWN */
+ { BTN_Y, JC_BTN_X, }, /* C LEFT */
+ { BTN_C, JC_BTN_MINUS, }, /* C RIGHT */
{ BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, },
{ /* sentinel */ },
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 2df1ab3c31cc..d965382196c6 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -64,7 +64,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
-#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@@ -190,15 +189,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
msgs[n].len = recv_len;
msgs[n].buf = recv_buf;
n++;
-
- set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
ret = i2c_transfer(client->adapter, msgs, n);
- if (recv_len)
- clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
-
if (ret != n)
return ret < 0 ? ret : -EIO;
@@ -556,9 +550,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
- if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
- return IRQ_HANDLED;
-
i2c_hid_get_input(ihid);
return IRQ_HANDLED;
@@ -735,12 +726,15 @@ static int i2c_hid_parse(struct hid_device *hid)
mutex_lock(&ihid->reset_lock);
do {
ret = i2c_hid_start_hwreset(ihid);
- if (ret)
+ if (ret == 0)
+ ret = i2c_hid_finish_hwreset(ihid);
+ else
msleep(1000);
} while (tries-- > 0 && ret);
+ mutex_unlock(&ihid->reset_lock);
if (ret)
- goto abort_reset;
+ return ret;
use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
&rsize);
@@ -750,11 +744,8 @@ static int i2c_hid_parse(struct hid_device *hid)
i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
} else {
rdesc = kzalloc(rsize, GFP_KERNEL);
-
- if (!rdesc) {
- ret = -ENOMEM;
- goto abort_reset;
- }
+ if (!rdesc)
+ return -ENOMEM;
i2c_hid_dbg(ihid, "asking HID report descriptor\n");
@@ -763,23 +754,10 @@ static int i2c_hid_parse(struct hid_device *hid)
rdesc, rsize);
if (ret) {
hid_err(hid, "reading report descriptor failed\n");
- goto abort_reset;
+ goto out;
}
}
- /*
- * Windows directly reads the report-descriptor after sending reset
- * and then waits for resets completion afterwards. Some touchpads
- * actually wait for the report-descriptor to be read before signalling
- * reset completion.
- */
- ret = i2c_hid_finish_hwreset(ihid);
-abort_reset:
- clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
- mutex_unlock(&ihid->reset_lock);
- if (ret)
- goto out;
-
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize);
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index a49c6affd7c4..dd5fc60874ba 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
+ dev->devc = &pdev->dev;
ishtp_device_init(dev);
init_waitqueue_head(&dev->wait_hw_ready);
@@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
}
dev->ops = &ish_hw_ops;
- dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
return dev;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index bf0df6ee4f78..07fb8d3c037f 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
-static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+ enum ib_cm_state old_state)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
- cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
+ enum ib_cm_state old_state;
struct cm_work *work;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
+ old_state = cm_id->state;
retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
@@ -1151,7 +1154,7 @@ retest:
msecs_to_jiffies(
CM_DESTROY_ID_WAIT_TIMEOUT));
if (!ret) /* timeout happened */
- cm_destroy_id_wait_timeout(cm_id);
+ cm_destroy_id_wait_timeout(cm_id, old_state);
} while (!ret);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 0c3c4e64812c..3e43687a7f6f 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
- if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
+ !mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index ae466e72fc43..255677bc12b2 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
+
+ mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 5d1010cafed8..7e9b996b47c8 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
path->num_nodes = num_nodes;
+ mutex_lock(&icc_bw_lock);
+
for (i = num_nodes - 1; i >= 0; i--) {
node->provider->users++;
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
@@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
node = node->reverse;
}
+ mutex_unlock(&icc_bw_lock);
+
return path;
}
@@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
pr_err("%s: error (%d)\n", __func__, ret);
mutex_lock(&icc_lock);
+ mutex_lock(&icc_bw_lock);
+
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users))
node->provider->users--;
}
+
+ mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
kfree_const(path->name);
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index 99824675ee3f..654abb9ce08e 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -116,15 +116,6 @@ static struct qcom_icc_node xm_sdc2 = {
.links = { X1E80100_SLAVE_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_master = {
- .name = "ddr_perf_mode_master",
- .id = X1E80100_MASTER_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 1,
- .links = { X1E80100_SLAVE_DDR_PERF_MODE },
-};
-
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = X1E80100_MASTER_QUP_CORE_0,
@@ -688,14 +679,6 @@ static struct qcom_icc_node qns_a2noc_snoc = {
.links = { X1E80100_MASTER_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_slave = {
- .name = "ddr_perf_mode_slave",
- .id = X1E80100_SLAVE_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 0,
-};
-
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = X1E80100_SLAVE_QUP_CORE_0,
@@ -1377,12 +1360,6 @@ static struct qcom_icc_bcm bcm_acv = {
.nodes = { &ebi },
};
-static struct qcom_icc_bcm bcm_acv_perf = {
- .name = "ACV_PERF",
- .num_nodes = 1,
- .nodes = { &ddr_perf_mode_slave },
-};
-
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
@@ -1583,18 +1560,15 @@ static const struct qcom_icc_desc x1e80100_aggre2_noc = {
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
- &bcm_acv_perf,
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
- [MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master,
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
- [SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index 99d4b075df49..76656fe0470d 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -37,6 +37,7 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL
depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU
+ select IOMMUFD_DRIVER
default n
help
This is dangerous, do not enable unless running
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 1a64364700eb..0ad2ff9065aa 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1002,7 +1002,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
} else {
pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST;
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b5757993c9b2..c39718042e2e 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index 6c9f00bcb94b..b543e6b9f3cf 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -400,25 +400,40 @@ static void mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
mei_stop(mei_dev);
+ mei_disable_interrupts(mei_dev);
+
+ vsc_tp_free_irq(hw->tp);
+
return 0;
}
static int mei_vsc_resume(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
int ret;
- ret = mei_restart(mei_dev);
+ ret = vsc_tp_request_irq(hw->tp);
if (ret)
return ret;
+ ret = mei_restart(mei_dev);
+ if (ret)
+ goto err_free;
+
/* start timer if stopped in suspend */
schedule_delayed_work(&mei_dev->timer_work, HZ);
return 0;
+
+err_free:
+ vsc_tp_free_irq(hw->tp);
+
+ return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index ecfb70cd057c..e6a98dba8a73 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -94,6 +94,27 @@ static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
{}
};
+static irqreturn_t vsc_tp_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ atomic_inc(&tp->assert_cnt);
+
+ wake_up(&tp->xfer_wait);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ if (tp->event_notify)
+ tp->event_notify(tp->event_notify_context);
+
+ return IRQ_HANDLED;
+}
+
/* wakeup firmware and wait for response */
static int vsc_tp_wakeup_request(struct vsc_tp *tp)
{
@@ -384,6 +405,37 @@ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
/**
+ * vsc_tp_request_irq - request irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+int vsc_tp_request_irq(struct vsc_tp *tp)
+{
+ struct spi_device *spi = tp->spi;
+ struct device *dev = &spi->dev;
+ int ret;
+
+ irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
+
+/**
+ * vsc_tp_free_irq - free irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_free_irq(struct vsc_tp *tp)
+{
+ free_irq(tp->spi->irq, tp);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
+
+/**
* vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
* @tp: vsc_tp device handle
*/
@@ -413,27 +465,6 @@ void vsc_tp_intr_disable(struct vsc_tp *tp)
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
-static irqreturn_t vsc_tp_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- atomic_inc(&tp->assert_cnt);
-
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- wake_up(&tp->xfer_wait);
-
- if (tp->event_notify)
- tp->event_notify(tp->event_notify_context);
-
- return IRQ_HANDLED;
-}
-
static int vsc_tp_match_any(struct acpi_device *adev, void *data)
{
struct acpi_device **__adev = data;
@@ -490,10 +521,9 @@ static int vsc_tp_probe(struct spi_device *spi)
tp->spi = spi;
irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
- ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
- vsc_tp_thread_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), tp);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
if (ret)
return ret;
@@ -522,6 +552,8 @@ static int vsc_tp_probe(struct spi_device *spi)
err_destroy_lock:
mutex_destroy(&tp->mutex);
+ free_irq(spi->irq, tp);
+
return ret;
}
@@ -532,6 +564,8 @@ static void vsc_tp_remove(struct spi_device *spi)
platform_device_unregister(tp->pdev);
mutex_destroy(&tp->mutex);
+
+ free_irq(spi->irq, tp);
}
static const struct acpi_device_id vsc_tp_acpi_ids[] = {
diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h
index f9513ddc3e40..14ca195cbddc 100644
--- a/drivers/misc/mei/vsc-tp.h
+++ b/drivers/misc/mei/vsc-tp.h
@@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
void *context);
+int vsc_tp_request_irq(struct vsc_tp *tp);
+void vsc_tp_free_irq(struct vsc_tp *tp);
+
void vsc_tp_intr_enable(struct vsc_tp *tp);
void vsc_tp_intr_disable(struct vsc_tp *tp);
void vsc_tp_intr_synchronize(struct vsc_tp *tp);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3730ec1ea135..f29ef72a2f1d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -566,13 +566,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
phy_interface_set_rgmii(supported);
}
-static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
- struct phylink_config *config)
+static void
+mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
+ int err;
+ u16 reg;
- /* Translate the default cmode */
- mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev, "p%d: failed to read port status\n", port);
+ return;
+ }
+
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
+ case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ break;
+
+ default:
+ dev_err(chip->dev,
+ "p%d: invalid port mode in status register: %04x\n",
+ port, reg);
+ }
+}
+
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ if (!mv88e6xxx_phy_is_internal(chip, port))
+ mv88e6250_setup_supported_interfaces(chip, port, config);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 86deeb347cbc..ddadeb9bfdae 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -25,10 +25,25 @@
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
+/* - Modes with PHY suffix use output instead of input clock
+ * - Modes without RMII or RGMII use MII
+ * - Modes without speed do not have a fixed speed specified in the manual
+ * ("DC to x MHz" - variable clock support?)
+ */
+#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
+#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
+#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 72ea97c5d5d4..82768b0e9026 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -436,10 +436,8 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
}
-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{
- struct bcmasp_intf *intf =
- container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0;
@@ -482,10 +480,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT);
}
- /* Ensure all descriptors have been written to DRAM for the hardware
- * to see updated contents.
- */
- wmb();
+ return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ int released = 0;
+
+ released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi);
@@ -797,6 +801,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
+ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
@@ -885,6 +890,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+ bcmasp_tx_reclaim(intf);
+
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3e4fb3c3e834..1be6d14030bc 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a30df865be7b..be96bb494ae6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1811,7 +1811,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
} else {
@@ -1821,7 +1821,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
@@ -1837,7 +1837,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1848,7 +1848,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
}
@@ -2127,11 +2127,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
false);
- if (!frag_len) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!frag_len)
+ goto oom_next_rx;
}
xdp_active = true;
}
@@ -2157,9 +2154,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -2170,29 +2165,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
} else {
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
}
}
@@ -2270,6 +2257,11 @@ next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2316,7 +2308,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
return rc;
}
@@ -9165,7 +9157,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5_PLUS(bp))
return;
status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -13267,6 +13259,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
@@ -13280,12 +13282,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -15623,6 +15620,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
@@ -15631,16 +15629,27 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- if (state == pci_channel_io_perm_failure) {
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
+
+ if (abort || state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
@@ -15728,6 +15737,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 0bdcdea0be3e..7c035fdf409c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -16103,8 +16103,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
rd32(&pf->hw, I40E_PRTGL_SAH));
if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- pf->hw.port, val);
+ dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
+ pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16644,7 +16644,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 13361a780ece..20b53ebe9a65 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3503,6 +3503,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_is_tc_config_same - Compare the mqprio TC config with the
+ * TC config already configured on this adapter.
+ * @adapter: board private structure
+ * @mqprio_qopt: TC config received from kernel.
+ *
+ * This function compares the TC config received from the kernel
+ * with the config already configured on the adapter.
+ *
+ * Return: True if configuration is same, false otherwise.
+ **/
+static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
+ struct tc_mqprio_qopt *mqprio_qopt)
+{
+ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
+ int i;
+
+ if (adapter->num_tc != mqprio_qopt->num_tc)
+ return false;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ if (ch[i].count != mqprio_qopt->count[i] ||
+ ch[i].offset != mqprio_qopt->offset[i])
+ return false;
+ }
+ return true;
+}
+
+/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type_data: tc offload data
@@ -3559,7 +3587,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
- if (adapter->num_tc == num_tc)
+ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index c51e2482cad2..dab25d333bd1 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -856,6 +856,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
if (lag && lag->bonded && lag->primary) {
@@ -867,11 +872,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
act_prt = ICE_LAG_INVALID_PORT;
}
- if (flags & ICE_VF_RESET_LOCK)
- mutex_lock(&vf->cfg_lock);
- else
- lockdep_assert_held(&vf->cfg_lock);
-
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
@@ -956,14 +956,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_mbx_clear_malvf(&vf->mbx_info);
out_unlock:
- if (flags & ICE_VF_RESET_LOCK)
- mutex_unlock(&vf->cfg_lock);
-
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
return err;
}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 90316dc58630..6bc56c7c181e 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -298,6 +298,7 @@ struct igc_adapter {
/* LEDs */
struct mutex led_mutex;
+ struct igc_led_classdev *leds;
};
void igc_up(struct igc_adapter *adapter);
@@ -723,6 +724,7 @@ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
int igc_led_setup(struct igc_adapter *adapter);
+void igc_led_free(struct igc_adapter *adapter);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
index bf240c5daf86..3929b25b6ae6 100644
--- a/drivers/net/ethernet/intel/igc/igc_leds.c
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -236,8 +236,8 @@ static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
pci_dev_id(adapter->pdev), index);
}
-static void igc_setup_ldev(struct igc_led_classdev *ldev,
- struct net_device *netdev, int index)
+static int igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct led_classdev *led_cdev = &ldev->led;
@@ -257,24 +257,46 @@ static void igc_setup_ldev(struct igc_led_classdev *ldev,
led_cdev->hw_control_get = igc_led_hw_control_get;
led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
- devm_led_classdev_register(&netdev->dev, led_cdev);
+ return led_classdev_register(&netdev->dev, led_cdev);
}
int igc_led_setup(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct device *dev = &netdev->dev;
struct igc_led_classdev *leds;
- int i;
+ int i, err;
mutex_init(&adapter->led_mutex);
- leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
- for (i = 0; i < IGC_NUM_LEDS; i++)
- igc_setup_ldev(leds + i, netdev, i);
+ for (i = 0; i < IGC_NUM_LEDS; i++) {
+ err = igc_setup_ldev(leds + i, netdev, i);
+ if (err)
+ goto err;
+ }
+
+ adapter->leds = leds;
return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
+ return err;
+}
+
+void igc_led_free(struct igc_adapter *adapter)
+{
+ struct igc_led_classdev *leds = adapter->leds;
+ int i;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index d9bd001af7ba..b529fb59f9d5 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7020,6 +7020,9 @@ static void igc_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
+ if (IS_ENABLED(CONFIG_IGC_LEDS))
+ igc_led_free(adapter);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index be709f83f331..e8b73b9d75e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2181,7 +2181,6 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
npc_mcam_rsrcs_deinit(rvu);
- kfree(mcam->counters.bmap);
if (rvu->kpu_prfl_addr)
iounmap(rvu->kpu_prfl_addr);
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index b2cabd6ab86c..cc9bcc420032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1640,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = {
.mdo_add_secy = mlx5e_macsec_add_secy,
.mdo_upd_secy = mlx5e_macsec_upd_secy,
.mdo_del_secy = mlx5e_macsec_del_secy,
+ .rx_uses_md_dst = true,
};
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e4d7739bd7c8..4a79c0d7e7ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -849,7 +849,7 @@ free_skb:
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
- EMAD, DISCARD);
+ EMAD, FORWARD);
static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 53b150b7ae4e..6c06b0592760 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -1357,24 +1357,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
.got_inactive = mlxsw_env_got_inactive,
};
-static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
{
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool mcia_128b_supported;
+ bool mcia_128b_supported = false;
int err;
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
- &mcia_128b_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+ &mcia_128b_supported);
mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
-
- return 0;
}
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
@@ -1445,15 +1441,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_type_set;
- err = mlxsw_env_max_module_eeprom_len_query(env);
- if (err)
- goto err_eeprom_len_query;
-
+ mlxsw_env_max_module_eeprom_len_query(env);
env->line_cards[0]->active = true;
return 0;
-err_eeprom_len_query:
err_type_set:
mlxsw_env_module_event_disable(env, 0);
err_mlxsw_env_module_event_enable:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4d617057af25..13fd067c39ed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1545,7 +1545,7 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool pci_reset_supported;
+ bool pci_reset_supported = false;
u32 sys_status;
int err;
@@ -1563,11 +1563,9 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
- &pci_reset_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
+ &pci_reset_supported);
if (pci_reset_supported) {
pci_dbg(pdev, "Starting PCI reset flow\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index baedf0d45e85..b1d08e958bf9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/idr.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -58,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
- if (id < tcam->max_regions) {
- __set_bit(id, tcam->used_regions);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_regions);
+ ida_free(&tcam->used_regions, id);
}
static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
- if (id < tcam->max_groups) {
- __set_bit(id, tcam->used_groups);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_groups);
+ ida_free(&tcam->used_groups, id);
}
struct mlxsw_sp_acl_tcam_pattern {
@@ -715,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+ mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+ mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@@ -726,6 +731,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
}
static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ /* The entry markers are relative to the current chunk and therefore
+ * needs to be reset together with the chunk marker.
+ */
+ ctx->current_vchunk = NULL;
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = NULL;
+}
+
+static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
@@ -747,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
- vregion->rehash.ctx.current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@@ -820,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
- cancel_delayed_work_sync(&vregion->rehash.dw);
+ if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
+ ctx->hints_priv)
+ ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@@ -1154,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
- ventry->entry, activity);
+ struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
+ int err;
+
+ mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
+ activity);
+ mutex_unlock(&vregion->lock);
+ return err;
}
static int
@@ -1189,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+ WARN_ON(vchunk->chunk2);
+
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@@ -1207,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@@ -1230,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+ if (list_empty(&vchunk->ventry_list))
+ goto out;
+
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@@ -1239,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
+ WARN_ON(ventry->vchunk != vchunk);
+
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@@ -1279,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
+out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@@ -1292,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
+ if (list_empty(&vregion->vchunk_list))
+ return 0;
+
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@@ -1320,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
- mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
+ if (ctx->this_is_rollback)
+ return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@@ -1340,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
- mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@@ -1389,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@@ -1441,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ return;
}
if (*credits >= 0)
@@ -1550,19 +1589,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
if (max_tcam_regions < max_regions)
max_regions = max_tcam_regions;
- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
- if (!tcam->used_regions) {
- err = -ENOMEM;
- goto err_alloc_used_regions;
- }
+ ida_init(&tcam->used_regions);
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
- if (!tcam->used_groups) {
- err = -ENOMEM;
- goto err_alloc_used_groups;
- }
+ ida_init(&tcam->used_groups);
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
@@ -1576,10 +1607,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_tcam_init:
- bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
- bitmap_free(tcam->used_regions);
-err_alloc_used_regions:
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
err_rehash_params_register:
mutex_destroy(&tcam->lock);
@@ -1592,8 +1621,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->fini(mlxsw_sp, tcam->priv);
- bitmap_free(tcam->used_groups);
- bitmap_free(tcam->used_regions);
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
mutex_destroy(&tcam->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 462bf448497d..79a1d8606512 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -6,15 +6,16 @@
#include <linux/list.h>
#include <linux/parman.h>
+#include <linux/idr.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_keys.h"
struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
+ struct ida used_regions;
unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
+ struct ida used_groups;
unsigned int max_groups;
unsigned int max_group_size;
struct mutex lock; /* guards vregion list */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b621ddd4539c..384ddad65aaf 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2729,19 +2729,18 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
struct platform_device *pdev = priv->pdev;
struct net_device *ndev = priv->ndev;
struct device *dev = &pdev->dev;
- const char *dev_name;
+ const char *devname = dev_name(dev);
unsigned long flags;
int error, irq_num;
if (irq_name) {
- dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!dev_name)
+ devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
+ if (!devname)
return -ENOMEM;
irq_num = platform_get_irq_byname(pdev, irq_name);
flags = 0;
} else {
- dev_name = ndev->name;
irq_num = platform_get_irq(pdev, 0);
flags = IRQF_SHARED;
}
@@ -2751,9 +2750,9 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
if (irq)
*irq = irq_num;
- error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+ error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+ netdev_err(ndev, "cannot request IRQ %s\n", devname);
return error;
}
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 68891a227dc8..59d6ab989c55 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -791,6 +791,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
+ if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
+ ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
+ (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
+ mtype_seqid = skb_cb->skb_mtype_seqid;
+
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 1d62c05b5f7c..284f97876054 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -417,12 +417,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
if (!i)
fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
- rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (rx_chn->irq[i] <= 0) {
- ret = rx_chn->irq[i];
+ ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (ret <= 0) {
+ if (!ret)
+ ret = -ENXIO;
netdev_err(ndev, "Failed to get rx dma irq");
goto fail;
}
+ rx_chn->irq[i] = ret;
}
return 0;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 6dff2c85682d..6fae161cbcb8 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1598,7 +1598,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = {0, };
+ struct irq_affinity affd = { .pre_vectors = 1 };
int nvecs, i;
/* We start by asking for one vector per queue pair */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 2fa511227eac..93295916b1d2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -20,8 +20,6 @@
#include "txgbe_phy.h"
#include "txgbe_hw.h"
-#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw"
-
static int txgbe_swnodes_register(struct txgbe *txgbe)
{
struct txgbe_nodes *nodes = &txgbe->nodes;
@@ -573,8 +571,8 @@ static int txgbe_clock_register(struct txgbe *txgbe)
char clk_name[32];
struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "%s.%d",
- TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev));
+ snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
+ pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
if (IS_ERR(clk))
@@ -636,7 +634,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
info.parent = &pdev->dev;
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
- info.name = TXGBE_I2C_CLK_DEV_NAME;
+ info.name = "i2c_designware";
info.id = pci_dev_id(pdev);
info.res = &DEFINE_RES_IRQ(pdev->irq);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ba4704c2c640..e62d6cbdf9bc 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1098,11 +1098,12 @@ out_hashtable:
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct hlist_node *next;
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
list_del_rcu(&gtp->list);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0206b84284ab..ff016c11b4a0 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -999,10 +999,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
+ bool is_macsec_md_dst;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
md_dst = skb_metadata_dst(skb);
+ is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1013,14 +1015,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
- struct macsec_rx_sc *rx_sc = NULL;
+ const struct macsec_ops *ops;
- if (md_dst && md_dst->type == METADATA_MACSEC)
- rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci);
+ ops = macsec_get_ops(macsec, NULL);
- if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc)
+ if (ops->rx_uses_md_dst && !is_macsec_md_dst)
continue;
+ if (is_macsec_md_dst) {
+ struct macsec_rx_sc *rx_sc;
+
+ /* All drivers that implement MACsec offload
+ * support using skb metadata destinations must
+ * indicate that they do so.
+ */
+ DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
+ rx_sc = find_rx_sc(&macsec->secy,
+ md_dst->u.macsec_info.sci);
+ if (!rx_sc)
+ continue;
+ /* device indicated macsec offload occurred */
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ eth_skb_pkt_type(skb, ndev);
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
+ }
+
+ /* This datapath is insecure because it is unable to
+ * enforce isolation of broadcast/multicast traffic and
+ * unicast traffic with promiscuous mode on the macsec
+ * netdev. Since the core stack has no mechanism to
+ * check that the hardware did indeed receive MACsec
+ * traffic, it is possible that the response handling
+ * done by the MACsec port was to a plaintext packet.
+ * This violates the MACsec protocol standard.
+ */
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1036,14 +1066,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
break;
nskb->dev = ndev;
- if (ether_addr_equal_64bits(hdr->h_dest,
- ndev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
- else
- nskb->pkt_type = PACKET_MULTICAST;
+ eth_skb_pkt_type(nskb, ndev);
__netif_rx(nskb);
- } else if (rx_sc || ndev->flags & IFF_PROMISC) {
+ } else if (ndev->flags & IFF_PROMISC) {
skb->dev = ndev;
skb->pkt_type = PACKET_HOST;
ret = RX_HANDLER_ANOTHER;
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index fa8c6fdcf301..d7aaefb5226b 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -695,7 +695,8 @@ static int dp83869_configure_mode(struct phy_device *phydev,
phy_ctrl_val = dp83869->mode;
if (phydev->interface == PHY_INTERFACE_MODE_MII) {
if (dp83869->mode == DP83869_100M_MEDIA_CONVERT ||
- dp83869->mode == DP83869_RGMII_100_BASE) {
+ dp83869->mode == DP83869_RGMII_100_BASE ||
+ dp83869->mode == DP83869_RGMII_COPPER_ETHERNET) {
phy_ctrl_val |= DP83869_OP_MODE_MII;
} else {
phydev_err(phydev, "selected op-mode is not valid with MII mode\n");
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
index 0f3a1538a8b8..f4f9412d0cd7 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek-ge-soc.c
@@ -216,6 +216,9 @@
#define MTK_PHY_LED_ON_LINK1000 BIT(0)
#define MTK_PHY_LED_ON_LINK100 BIT(1)
#define MTK_PHY_LED_ON_LINK10 BIT(2)
+#define MTK_PHY_LED_ON_LINK (MTK_PHY_LED_ON_LINK10 |\
+ MTK_PHY_LED_ON_LINK100 |\
+ MTK_PHY_LED_ON_LINK1000)
#define MTK_PHY_LED_ON_LINKDOWN BIT(3)
#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
@@ -231,6 +234,12 @@
#define MTK_PHY_LED_BLINK_100RX BIT(3)
#define MTK_PHY_LED_BLINK_10TX BIT(4)
#define MTK_PHY_LED_BLINK_10RX BIT(5)
+#define MTK_PHY_LED_BLINK_RX (MTK_PHY_LED_BLINK_10RX |\
+ MTK_PHY_LED_BLINK_100RX |\
+ MTK_PHY_LED_BLINK_1000RX)
+#define MTK_PHY_LED_BLINK_TX (MTK_PHY_LED_BLINK_10TX |\
+ MTK_PHY_LED_BLINK_100TX |\
+ MTK_PHY_LED_BLINK_1000TX)
#define MTK_PHY_LED_BLINK_COLLISION BIT(6)
#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
@@ -1247,11 +1256,9 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (blink < 0)
return -EIO;
- if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK10)) ||
- (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX |
- MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)))
+ if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
+ MTK_PHY_LED_ON_LINKDOWN)) ||
+ (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX)))
set_bit(bit_netdev, &priv->led_state);
else
clear_bit(bit_netdev, &priv->led_state);
@@ -1269,7 +1276,7 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (!rules)
return 0;
- if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10))
+ if (on & MTK_PHY_LED_ON_LINK)
*rules |= BIT(TRIGGER_NETDEV_LINK);
if (on & MTK_PHY_LED_ON_LINK10)
@@ -1287,10 +1294,10 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (on & MTK_PHY_LED_ON_HDX)
*rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
- if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX))
+ if (blink & MTK_PHY_LED_BLINK_RX)
*rules |= BIT(TRIGGER_NETDEV_RX);
- if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))
+ if (blink & MTK_PHY_LED_BLINK_TX)
*rules |= BIT(TRIGGER_NETDEV_TX);
return 0;
@@ -1323,15 +1330,19 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
on |= MTK_PHY_LED_ON_LINK1000;
if (rules & BIT(TRIGGER_NETDEV_RX)) {
- blink |= MTK_PHY_LED_BLINK_10RX |
- MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_1000RX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000RX : 0)) :
+ MTK_PHY_LED_BLINK_RX;
}
if (rules & BIT(TRIGGER_NETDEV_TX)) {
- blink |= MTK_PHY_LED_BLINK_10TX |
- MTK_PHY_LED_BLINK_100TX |
- MTK_PHY_LED_BLINK_1000TX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000TX : 0)) :
+ MTK_PHY_LED_BLINK_TX;
}
if (blink || on)
@@ -1344,9 +1355,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
MTK_PHY_LED0_ON_CTRL,
MTK_PHY_LED_ON_FDX |
MTK_PHY_LED_ON_HDX |
- MTK_PHY_LED_ON_LINK10 |
- MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK1000,
+ MTK_PHY_LED_ON_LINK,
on);
if (ret)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index a493fde1af3f..377be0d9ef14 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1455,21 +1455,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
- skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!ax_skb)
return 0;
- skb_trim(ax_skb, pkt_len);
+ skb_put(ax_skb, pkt_len);
+ memcpy(ax_skb->data, skb->data + 2, pkt_len);
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
-
- skb->truesize = pkt_len_plus_padd +
- SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5e0c35b9b8c5..663e46348ce3 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1360,6 +1360,9 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 72ecf6cf809c..4bbfb516ac05 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1616,6 +1616,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+ return false;
+
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index c32be587000d..4fdd56cd30a1 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -9112,6 +9112,7 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
offload = &arvif->arp_ns_offload;
count = 0;
+ /* Note: read_lock_bh() calls rcu_read_lock() */
read_lock_bh(&idev->lock);
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
@@ -9142,7 +9143,8 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
}
/* get anycast address */
- for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
+ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+ ifaca6 = rcu_dereference(ifaca6->aca_next)) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index bc98b87cf2a1..6c01859b4323 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 90
+#define IWL_BZ_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 80
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 9b79279fd76c..dbbcb2d0968c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 90
+#define IWL_SC_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 82
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index c9000c878005..72a3d71f46f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -53,6 +53,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!pasn)
return -ENOBUFS;
+ iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
+
pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
switch (pasn->cipher) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index ae45aa56d01d..84b497b22d83 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -279,6 +279,7 @@ int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
NULL);
+ iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
return 0;
}
@@ -296,7 +297,6 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
- iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
cmd.spec_link_id = link_conf->link_id;
cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 7cdcad19deef..fd1c5808c72b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2829,7 +2829,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (ver_handler->version != scan_ver)
continue;
- return ver_handler->handler(mvm, vif, params, type, uid);
+ err = ver_handler->handler(mvm, vif, params, type, uid);
+ return err ? : uid;
}
err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index f5aed44ec80a..5b166532e8cd 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -3905,7 +3905,7 @@ static int hwsim_pmsr_report_nl(struct sk_buff *msg, struct genl_info *info)
}
nla_for_each_nested(peer, peers, rem) {
- struct cfg80211_pmsr_result result;
+ struct cfg80211_pmsr_result result = {};
err = mac80211_hwsim_parse_pmsr_result(peer, &result, info);
if (err)
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 7eb17f46a815..9e1a34e23af2 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -424,7 +424,8 @@ struct trf7970a {
enum trf7970a_state state;
struct device *dev;
struct spi_device *spi;
- struct regulator *regulator;
+ struct regulator *vin_regulator;
+ struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
bool is_initiator;
@@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
if (trf->state != TRF7970A_ST_PWR_OFF)
return 0;
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret;
@@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
- ret = regulator_disable(trf->regulator);
+ ret = regulator_disable(trf->vin_regulator);
if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret);
@@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
- trf->regulator = devm_regulator_get(&spi->dev, "vin");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
+ if (IS_ERR(trf->vin_regulator)) {
+ ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock;
}
- uvolts = regulator_get_voltage(trf->regulator);
+ uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
- trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
+ if (IS_ERR(trf->vddio_regulator)) {
+ ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vddio_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- if (regulator_get_voltage(trf->regulator) == 1800000) {
+ if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
}
@@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM;
- goto err_disable_regulator;
+ goto err_disable_vddio_regulator;
}
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@@ -2137,8 +2138,10 @@ err_shutdown:
trf7970a_shutdown(trf);
err_free_ddev:
nfc_digital_free_device(trf->ddev);
-err_disable_regulator:
- regulator_disable(trf->regulator);
+err_disable_vddio_regulator:
+ regulator_disable(trf->vddio_regulator);
+err_disable_vin_regulator:
+ regulator_disable(trf->vin_regulator);
err_destroy_lock:
mutex_destroy(&trf->lock);
return ret;
@@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
- regulator_disable(trf->regulator);
+ regulator_disable(trf->vddio_regulator);
+ regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock);
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f95d12345d98..920f550bc313 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
- spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else {
+ if (ret) {
+ spin_unlock_irq(cdev->ccwlock);
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
@@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
- spin_lock_irq(cdev->ccwlock);
+ /* Wait until a final state is reached */
+ while (!dev_fsm_final_state(cdev)) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ spin_lock_irq(cdev->ccwlock);
+ }
/* Check if online processing was successful */
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 65d8b2cfd626..42791fa0b80e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -504,6 +504,11 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
+ CIO_MSG_EVENT(2, "fakeirb: deliver device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
+ cdev->private->intparm,
+ cdev->private->flags.fake_irb);
create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 40c97f873075..acd6790dba4d 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -208,6 +208,10 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
@@ -551,6 +555,10 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_TM_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3d9f0834c78b..a1cb39f4b7a2 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
lgr_info_log();
}
-static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
- int dstat)
+static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+ int dstat, int dcc)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
@@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
goto error;
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
+ if (dcc == 1)
+ return -EAGAIN;
if (!(dstat & DEV_STAT_DEV_END))
goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
- return;
+ return 0;
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ return -EIO;
}
/* qdio interrupt handler */
@@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- int cstat, dstat;
+ int cstat, dstat, rc, dcc;
if (!intparm || !irq_ptr) {
ccw_device_get_schid(cdev, &schid);
@@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
+ dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
+ rc = 0;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- qdio_establish_handle_irq(irq_ptr, cstat, dstat);
+ rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cstat || dstat)
qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
+ else if (dcc == 1)
+ rc = -EAGAIN;
break;
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON_ONCE(1);
}
+
+ if (rc == -EAGAIN) {
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
+ rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
+ if (!rc)
+ return;
+ DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ }
+
wake_up(&cdev->private->wait_q);
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2e28e2360c85..5b3230ef51fe 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -635,10 +635,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_queue_add_random(q))
add_disk_randomness(req->q->disk);
- if (!blk_rq_is_passthrough(req)) {
- WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
- cmd->flags &= ~SCMD_INITIALIZED;
- }
+ WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
+ !(cmd->flags & SCMD_INITIALIZED));
+ cmd->flags = 0;
/*
* Calling rcu_barrier() is not necessary here because the
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index c617e8b9f0dd..d78d54ae2605 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -616,6 +616,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
tze->trip_stats[trip_id].timestamp = now;
tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
+ tze->trip_stats[trip_id].count++;
tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
(temperature - tze->trip_stats[trip_id].avg) /
tze->trip_stats[trip_id].count;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 6ffc4e81ffed..326433df5880 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -3180,22 +3180,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged)
- return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
+ /*
+ * Unconfigure downstream port so that wake-on-connect can be
+ * configured after router unplug. No need to unconfigure upstream port
+ * since its router is unplugged.
+ */
up = tb_upstream_port(sw);
- if (tb_switch_is_usb4(up->sw))
- usb4_port_unconfigure(up);
- else
- tb_lc_unconfigure_port(up);
-
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
+
+ if (sw->is_unplugged)
+ return;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ usb4_port_unconfigure(up);
+ else
+ tb_lc_unconfigure_port(up);
}
static void tb_switch_credits_init(struct tb_switch *sw)
@@ -3441,7 +3448,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
return tb_lc_set_wake(sw, flags);
}
-int tb_switch_resume(struct tb_switch *sw)
+static void tb_switch_check_wakes(struct tb_switch *sw)
+{
+ if (device_may_wakeup(&sw->dev)) {
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_check_wakes(sw);
+ }
+}
+
+/**
+ * tb_switch_resume() - Resume a switch after sleep
+ * @sw: Switch to resume
+ * @runtime: Is this resume from runtime suspend or system sleep
+ *
+ * Resumes and re-enumerates router (and all its children), if still plugged
+ * after suspend. Don't enumerate device router whose UID was changed during
+ * suspend. If this is resume from system sleep, notifies PM core about the
+ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
+ * upstream port for USB4 routers that shall be always enabled.
+ */
+int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
struct tb_port *port;
int err;
@@ -3490,6 +3516,9 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
+ if (!runtime)
+ tb_switch_check_wakes(sw);
+
/* Disable wakes */
tb_switch_set_wake(sw, 0);
@@ -3519,7 +3548,8 @@ int tb_switch_resume(struct tb_switch *sw)
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
- if (port->remote && tb_switch_resume(port->remote->sw)) {
+ if (port->remote &&
+ tb_switch_resume(port->remote->sw, runtime)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index c5ce7a694b27..3e44c78ac409 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1801,6 +1801,12 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue;
}
+ /* Needs to be on different routers */
+ if (in->sw == port->sw) {
+ tb_port_dbg(port, "skipping DP OUT on same router\n");
+ continue;
+ }
+
tb_port_dbg(port, "DP OUT available\n");
/*
@@ -2936,7 +2942,7 @@ static int tb_resume_noirq(struct tb *tb)
if (!tb_switch_is_usb4(tb->root_switch))
tb_switch_reset(tb->root_switch);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, false);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
@@ -3062,7 +3068,7 @@ static int tb_runtime_resume(struct tb *tb)
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, true);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index feed8ecaf712..18aae4ccaed5 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -827,7 +827,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
-int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw, bool runtime);
int tb_switch_reset(struct tb_switch *sw);
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec);
@@ -1288,6 +1288,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
return usb4_switch_version(sw) > 0;
}
+void usb4_switch_check_wakes(struct tb_switch *sw);
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_configuration_valid(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 9860b49d7a2b..78b06e922fda 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -155,7 +155,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
tx_dwords, rx_data, rx_dwords);
}
-static void usb4_switch_check_wakes(struct tb_switch *sw)
+/**
+ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
+ * @sw: Router whose wakes to check
+ *
+ * Checks wakes occurred during suspend and notify the PM core about them.
+ */
+void usb4_switch_check_wakes(struct tb_switch *sw)
{
bool wakeup_usb4 = false;
struct usb4_port *usb4;
@@ -163,9 +169,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw)
bool wakeup = false;
u32 val;
- if (!device_may_wakeup(&sw->dev))
- return;
-
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
@@ -244,8 +247,6 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
- usb4_switch_check_wakes(sw);
-
if (!tb_route(sw))
return 0;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a3acbf0f5da1..1300c92b8702 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -356,9 +356,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
+ clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, newrate);
- if (rate > 0 && p->uartclk != rate) {
- clk_disable_unprepare(d->clk);
+ if (rate > 0) {
/*
* Note that any clock-notifer worker will block in
* serial8250_update_uartclk() until we are done.
@@ -366,8 +366,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, newrate);
if (!ret)
p->uartclk = rate;
- clk_prepare_enable(d->clk);
}
+ clk_prepare_enable(d->clk);
dw8250_do_set_termios(p, termios, old);
}
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 7984ee05af1d..47e1a056a60c 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -151,7 +151,7 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
ret = uart_read_port_properties(&uart.port);
if (ret)
- return ret;
+ goto dis_uart_clk;
uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 0d35c77fad9e..e2e4f99f9d34 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5010,12 +5010,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_bt_2_115200 },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_4_460800 },
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4749331fe618..1e8853eae504 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1086,11 +1086,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istat;
+ u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
- u32 stat = mxs_read(s, REG_STAT);
+ uart_port_lock(&s->port);
+
+ stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
@@ -1126,6 +1128,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
+ uart_port_unlock(&s->port);
+
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 05d97e89511e..92195f984de1 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_port *port;
unsigned char ch, r1, drop, flag;
- int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) {
@@ -291,25 +290,12 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
- /* We can get stuck in an infinite loop getting char 0 when the
- * line is in a wrong HW state, we break that here.
- * When that happens, I disable the receive side of the driver.
- * Note that what I've been experiencing is a real irq loop where
- * I'm getting flooded regardless of the actual port speed.
- * Something strange is going on with the HW
- */
- if ((++loops) > 1000)
- goto flood;
ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV))
break;
}
return true;
- flood:
- pmz_interrupt_control(uap, 0);
- pmz_error("pmz: rx irq flood !\n");
- return true;
}
static void pmz_status_handle(struct uart_pmac_port *uap)
diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
index c74c548f0db6..b6c38d2edfd4 100644
--- a/drivers/tty/serial/serial_base.h
+++ b/drivers/tty/serial/serial_base.h
@@ -22,6 +22,7 @@ struct serial_ctrl_device {
struct serial_port_device {
struct device dev;
struct uart_port *port;
+ unsigned int tx_enabled:1;
};
int serial_base_ctrl_init(void);
@@ -30,6 +31,9 @@ void serial_base_ctrl_exit(void);
int serial_base_port_init(void);
void serial_base_port_exit(void);
+void serial_base_port_startup(struct uart_port *port);
+void serial_base_port_shutdown(struct uart_port *port);
+
int serial_base_driver_register(struct device_driver *driver);
void serial_base_driver_unregister(struct device_driver *driver);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index ff85ebd3a007..c476d884356d 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
* enabled, serial_port_runtime_resume() calls start_tx() again
* after enabling the device.
*/
- if (pm_runtime_active(&port_dev->dev))
+ if (!pm_runtime_enabled(port->dev) || pm_runtime_active(&port_dev->dev))
port->ops->start_tx(port);
pm_runtime_mark_last_busy(&port_dev->dev);
pm_runtime_put_autosuspend(&port_dev->dev);
@@ -323,16 +323,26 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
bool init_hw)
{
struct tty_port *port = &state->port;
+ struct uart_port *uport;
int retval;
if (tty_port_initialized(port))
- return 0;
+ goto out_base_port_startup;
retval = uart_port_startup(tty, state, init_hw);
- if (retval)
+ if (retval) {
set_bit(TTY_IO_ERROR, &tty->flags);
+ return retval;
+ }
- return retval;
+out_base_port_startup:
+ uport = uart_port_check(state);
+ if (!uport)
+ return -EIO;
+
+ serial_base_port_startup(uport);
+
+ return 0;
}
/*
@@ -355,6 +365,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
+ if (uport)
+ serial_base_port_shutdown(uport);
+
if (tty_port_initialized(port)) {
tty_port_set_initialized(port, false);
@@ -1775,6 +1788,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
uport->ops->stop_rx(uport);
uart_port_unlock_irq(uport);
+ serial_base_port_shutdown(uport);
uart_port_shutdown(port);
/*
@@ -1788,6 +1802,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
* Free the transmit buffer.
*/
uart_port_lock_irq(uport);
+ uart_circ_clear(&state->xmit);
buf = state->xmit.buf;
state->xmit.buf = NULL;
uart_port_unlock_irq(uport);
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 22b9eeb23e68..7e3a1c7b097c 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -39,8 +39,12 @@ static int serial_port_runtime_resume(struct device *dev)
/* Flush any pending TX for the port */
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled)
+ goto unlock;
if (__serial_port_busy(port))
port->ops->start_tx(port);
+
+unlock:
uart_port_unlock_irqrestore(port, flags);
out:
@@ -60,6 +64,11 @@ static int serial_port_runtime_suspend(struct device *dev)
return 0;
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled) {
+ uart_port_unlock_irqrestore(port, flags);
+ return 0;
+ }
+
busy = __serial_port_busy(port);
if (busy)
port->ops->start_tx(port);
@@ -71,6 +80,31 @@ static int serial_port_runtime_suspend(struct device *dev)
return busy ? -EBUSY : 0;
}
+static void serial_base_port_set_tx(struct uart_port *port,
+ struct serial_port_device *port_dev,
+ bool enabled)
+{
+ unsigned long flags;
+
+ uart_port_lock_irqsave(port, &flags);
+ port_dev->tx_enabled = enabled;
+ uart_port_unlock_irqrestore(port, flags);
+}
+
+void serial_base_port_startup(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, true);
+}
+
+void serial_base_port_shutdown(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, false);
+}
+
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
serial_port_runtime_suspend,
serial_port_runtime_resume, NULL);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 58d169e5c1db..4fa5a03ebac0 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -861,6 +861,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
unsigned int size;
+ irqreturn_t ret = IRQ_NONE;
sr = readl_relaxed(port->membase + ofs->isr);
@@ -869,11 +870,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
(sr & USART_SR_TC)) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_disable(port);
+ ret = IRQ_HANDLED;
}
- if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
+ ret = IRQ_HANDLED;
+ }
if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
/* Clear wake up flag and disable wake up interrupt */
@@ -882,6 +886,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
+ ret = IRQ_HANDLED;
}
/*
@@ -896,6 +901,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
}
@@ -903,6 +909,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_port_lock(port);
stm32_usart_transmit_chars(port);
uart_port_unlock(port);
+ ret = IRQ_HANDLED;
}
/* Receiver timeout irq for DMA RX */
@@ -912,9 +919,10 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return ret;
}
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1084,6 +1092,7 @@ static int stm32_usart_startup(struct uart_port *port)
val |= USART_CR2_SWAP;
writel_relaxed(val, port->membase + ofs->cr2);
}
+ stm32_port->throttled = false;
/* RX FIFO Flush */
if (ofs->rqr != UNDEF_REG)
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 06859e17b67b..7a00004bfd03 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -47,7 +47,7 @@ enum {
TSTBUS_MAX,
};
-#define QCOM_UFS_MAX_GEAR 4
+#define QCOM_UFS_MAX_GEAR 5
#define QCOM_UFS_MAX_LANE 2
enum {
@@ -67,26 +67,32 @@ static const struct __ufs_qcom_bw_table {
[MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 },
[MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_MAX][0][0] = { 7643136, 307200 },
};
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c8262e2f2917..c553decb5461 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -485,7 +485,6 @@ out_free_mem:
static int service_outstanding_interrupt(struct wdm_device *desc)
{
int rv = 0;
- int used;
/* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count)
@@ -500,10 +499,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
goto out;
}
- used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
- if (used)
- goto out;
-
+ set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 686c01af03e6..0e1262a077ae 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -449,8 +449,10 @@ static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
- if (port_dev->child)
+ if (port_dev->child) {
usb_disable_usb2_hardware_lpm(port_dev->child);
+ usb_unlocked_disable_lpm(port_dev->child);
+ }
}
static const struct dev_pm_ops usb_port_pm_ops = {
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 79582b102c7e..994a78ad084b 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -867,13 +867,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 frame_desc_idx;
- struct urb *usb_urb = qtd->urb->priv;
+ struct urb *usb_urb;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
+ usb_urb = qtd->urb->priv;
+
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 72bb722da2f2..d96ffbe52039 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -226,7 +226,8 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
/* reinitialize physical ep1 */
dep = dwc->eps[1];
- dep->flags = DWC3_EP_ENABLED;
+ dep->flags &= DWC3_EP_RESOURCE_ALLOCATED;
+ dep->flags |= DWC3_EP_ENABLED;
/* stall is always issued on EP0 */
dep = dwc->eps[0];
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index bffbc1dc651f..f855f1fc8e5e 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -46,6 +46,8 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+#define DMABUF_ENQUEUE_TIMEOUT_MS 5000
+
MODULE_IMPORT_NS(DMA_BUF);
/* Reference counter handling */
@@ -1578,10 +1580,13 @@ static int ffs_dmabuf_transfer(struct file *file,
struct ffs_dmabuf_priv *priv;
struct ffs_dma_fence *fence;
struct usb_request *usb_req;
+ enum dma_resv_usage resv_dir;
struct dma_buf *dmabuf;
+ unsigned long timeout;
struct ffs_ep *ep;
bool cookie;
u32 seqno;
+ long retl;
int ret;
if (req->flags & ~USB_FFS_DMABUF_TRANSFER_MASK)
@@ -1615,17 +1620,14 @@ static int ffs_dmabuf_transfer(struct file *file,
goto err_attachment_put;
/* Make sure we don't have writers */
- if (!dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_WRITE)) {
- pr_vdebug("FFS WRITE fence is not signaled\n");
- ret = -EBUSY;
- goto err_resv_unlock;
- }
-
- /* If we're writing to the DMABUF, make sure we don't have readers */
- if (epfile->in &&
- !dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_READ)) {
- pr_vdebug("FFS READ fence is not signaled\n");
- ret = -EBUSY;
+ timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
+ retl = dma_resv_wait_timeout(dmabuf->resv,
+ dma_resv_usage_rw(epfile->in),
+ true, timeout);
+ if (retl == 0)
+ retl = -EBUSY;
+ if (retl < 0) {
+ ret = (int)retl;
goto err_resv_unlock;
}
@@ -1665,8 +1667,9 @@ static int ffs_dmabuf_transfer(struct file *file,
dma_fence_init(&fence->base, &ffs_dmabuf_fence_ops,
&priv->lock, priv->context, seqno);
- dma_resv_add_fence(dmabuf->resv, &fence->base,
- dma_resv_usage_rw(epfile->in));
+ resv_dir = epfile->in ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ;
+
+ dma_resv_add_fence(dmabuf->resv, &fence->base, resv_dir);
dma_resv_unlock(dmabuf->resv);
/* Now that the dma_fence is in place, queue the transfer. */
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 28f4e6552e84..0acc32ed9960 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -878,7 +878,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (alt > 1)
goto fail;
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
DBG(cdev, "reset ncm\n");
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
@@ -1367,7 +1367,7 @@ static void ncm_disable(struct usb_function *f)
DBG(cdev, "ncm deactivated\n");
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index e82d03224f94..3432ebfae978 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -868,7 +868,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
struct fsl_req *req = container_of(_req, struct fsl_req, req);
- struct fsl_udc *udc;
+ struct fsl_udc *udc = ep->udc;
unsigned long flags;
int ret;
@@ -878,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
dev_vdbg(&udc->gadget.dev, "%s, bad params\n", __func__);
return -EINVAL;
}
- if (unlikely(!_ep || !ep->ep.desc)) {
+ if (unlikely(!ep->ep.desc)) {
dev_vdbg(&udc->gadget.dev, "%s, bad ep\n", __func__);
return -EINVAL;
}
@@ -887,7 +887,6 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
return -EMSGSIZE;
}
- udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 52278afea94b..575f0fd9c9f1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3133,7 +3133,7 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- irqreturn_t ret = IRQ_NONE;
+ irqreturn_t ret = IRQ_HANDLED;
u32 status;
spin_lock(&xhci->lock);
@@ -3141,12 +3141,13 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
status = readl(&xhci->op_regs->status);
if (status == ~(u32)0) {
xhci_hc_died(xhci);
- ret = IRQ_HANDLED;
goto out;
}
- if (!(status & STS_EINT))
+ if (!(status & STS_EINT)) {
+ ret = IRQ_NONE;
goto out;
+ }
if (status & STS_HCE) {
xhci_warn(xhci, "WARNING: Host Controller Error\n");
@@ -3156,7 +3157,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
- ret = IRQ_HANDLED;
goto out;
}
@@ -3167,7 +3167,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
*/
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
- ret = IRQ_HANDLED;
/* This is the handler of the primary interrupter */
xhci_handle_events(xhci, xhci->interrupters[0]);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 1740000d54c2..5762564b9d73 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -172,8 +172,7 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__field(void *, vdev)
__field(unsigned long long, out_ctx)
__field(unsigned long long, in_ctx)
- __field(int, hcd_portnum)
- __field(int, hw_portnum)
+ __field(int, slot_id)
__field(u16, current_mel)
),
@@ -181,13 +180,12 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__entry->vdev = vdev;
__entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
__entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
- __entry->hcd_portnum = (int) vdev->rhub_port->hcd_portnum;
- __entry->hw_portnum = (int) vdev->rhub_port->hw_portnum;
+ __entry->slot_id = (int) vdev->slot_id;
__entry->current_mel = (u16) vdev->current_mel;
),
- TP_printk("vdev %p ctx %llx | %llx hcd_portnum %d hw_portnum %d current_mel %d",
- __entry->vdev, __entry->in_ctx, __entry->out_ctx,
- __entry->hcd_portnum, __entry->hw_portnum, __entry->current_mel
+ TP_printk("vdev %p slot %d ctx %llx | %llx current_mel %d",
+ __entry->vdev, __entry->slot_id, __entry->in_ctx,
+ __entry->out_ctx, __entry->current_mel
)
);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index c6101ed2d9d4..d8049275a023 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -78,7 +78,7 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
if (err) {
dev_err(hub->dev, "failed to enable supplies: %pe\n", ERR_PTR(err));
- return err;
+ goto disable_clk;
}
fsleep(hub->pdata->reset_us);
@@ -87,6 +87,10 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
hub->is_powered_on = true;
return 0;
+
+disable_clk:
+ clk_disable_unprepare(hub->clk);
+ return err;
}
static int onboard_hub_power_off(struct onboard_hub *hub)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 55a65d941ccb..8a5846d4adf6 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128
+#define QUECTEL_PRODUCT_EM060K_129 0x0129
+#define QUECTEL_PRODUCT_EM060K_12a 0x012a
+#define QUECTEL_PRODUCT_EM060K_12b 0x012b
+#define QUECTEL_PRODUCT_EM060K_12c 0x012c
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
@@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
+ .driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
diff --git a/drivers/usb/typec/mux/it5205.c b/drivers/usb/typec/mux/it5205.c
index 5535932e42cd..4357cc67a867 100644
--- a/drivers/usb/typec/mux/it5205.c
+++ b/drivers/usb/typec/mux/it5205.c
@@ -22,7 +22,7 @@
#include <linux/usb/typec_mux.h>
#define IT5205_REG_CHIP_ID(x) (0x4 + (x))
-#define IT5205FN_CHIP_ID 0x35323035 /* "5205" */
+#define IT5205FN_CHIP_ID 0x35303235 /* "5025" -> "5205" */
/* MUX power down register */
#define IT5205_REG_MUXPDR 0x10
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index c26fb70c3ec6..ab6ed6111ed0 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -6855,14 +6855,14 @@ static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
if (data->sink_desc.pdo[0]) {
for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
port->snk_pdo[i] = data->sink_desc.pdo[i];
- port->nr_snk_pdo = i + 1;
+ port->nr_snk_pdo = i;
port->operating_snk_mw = data->operating_snk_mw;
}
if (data->source_desc.pdo[0]) {
for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
port->src_pdo[i] = data->source_desc.pdo[i];
- port->nr_src_pdo = i + 1;
+ port->nr_src_pdo = i;
}
switch (port->state) {
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 31d8a46ae5e7..bd6ae92aa39e 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1736,11 +1736,13 @@ static int ucsi_init(struct ucsi *ucsi)
ucsi->connector = connector;
ucsi->ntfy = ntfy;
+ mutex_lock(&ucsi->ppm_lock);
ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ mutex_unlock(&ucsi->ppm_lock);
if (ret)
return ret;
- if (UCSI_CCI_CONNECTOR(READ_ONCE(cci)))
- ucsi_connector_change(ucsi, cci);
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
return 0;