summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_video.c27
-rw-r--r--drivers/acpi/acpi_watchdog.c59
-rw-r--r--drivers/acpi/button.c24
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c13
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-mapping.c6
-rw-r--r--drivers/block/loop.c64
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/rbd.c101
-rw-r--r--drivers/block/swim.c49
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/random.c172
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-imx-tpm.c45
-rw-r--r--drivers/clocksource/timer-npcm7xx.c215
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c323
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h4
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c73
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c37
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c13
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c1
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c24
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c13
-rw-r--r--drivers/hid/wacom_wac.c76
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c8
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c14
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/raid1.c25
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c39
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c25
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c196
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c89
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c44
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c11
-rw-r--r--drivers/net/ethernet/sfc/ef10.c85
-rw-r--r--drivers/net/ethernet/sfc/efx.c143
-rw-r--r--drivers/net/ethernet/sfc/efx.h21
-rw-r--r--drivers/net/ethernet/sfc/farch.c41
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h61
-rw-r--r--drivers/net/ethernet/sfc/rx.c122
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/microchip.c178
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c38
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c79
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/nvdimm/Kconfig3
-rw-r--r--drivers/nvdimm/dimm_devs.c22
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c19
-rw-r--r--drivers/s390/block/dasd_alias.c13
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/char/sclp_early_core.c2
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c19
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c158
-rw-r--r--drivers/s390/net/qeth_core_mpc.h12
-rw-r--r--drivers/s390/net/qeth_l2_main.c60
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/scsi/fnic/fnic_trace.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c6
-rw-r--r--drivers/scsi/scsi_debug.c33
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c29
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_zbc.c140
-rw-r--r--drivers/scsi/ufs/ufshcd.c40
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/watchdog/aspeed_wdt.c7
-rw-r--r--drivers/watchdog/renesas_wdt.c6
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/watchdog/wafer5823wdt.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c3
135 files changed, 2523 insertions, 1196 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 76fb96966f7b..2f2e737be0f8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
return opregion;
}
+static bool dmi_is_desktop(void)
+{
+ const char *chassis_type;
+
+ chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+ if (!chassis_type)
+ return false;
+
+ if (!strcmp(chassis_type, "3") || /* 3: Desktop */
+ !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
+ !strcmp(chassis_type, "5") || /* 5: Pizza Box */
+ !strcmp(chassis_type, "6") || /* 6: Mini Tower */
+ !strcmp(chassis_type, "7") || /* 7: Tower */
+ !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
+ return true;
+
+ return false;
+}
+
int acpi_video_register(void)
{
int ret = 0;
@@ -2143,8 +2162,12 @@ int acpi_video_register(void)
* win8 ready (where we also prefer the native backlight driver, so
* normally the acpi_video code should not register there anyways).
*/
- if (only_lcd == -1)
- only_lcd = acpi_osi_is_win8();
+ if (only_lcd == -1) {
+ if (dmi_is_desktop() && acpi_osi_is_win8())
+ only_lcd = true;
+ else
+ only_lcd = false;
+ }
dmi_check_system(video_dmi_table);
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index ebb626ffb5fa..4bde16fb97d8 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -12,23 +12,64 @@
#define pr_fmt(fmt) "ACPI: watchdog: " fmt
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include "internal.h"
+static const struct dmi_system_id acpi_watchdog_skip[] = {
+ {
+ /*
+ * On Lenovo Z50-70 there are two issues with the WDAT
+ * table. First some of the instructions use RTC SRAM
+ * to store persistent information. This does not work well
+ * with Linux RTC driver. Second, more important thing is
+ * that the instructions do not actually reset the system.
+ *
+ * On this particular system iTCO_wdt seems to work just
+ * fine so we prefer that over WDAT for now.
+ *
+ * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
+ */
+ .ident = "Lenovo Z50-70",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
+ },
+ },
+ {}
+};
+
+static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
+{
+ const struct acpi_table_wdat *wdat = NULL;
+ acpi_status status;
+
+ if (acpi_disabled)
+ return NULL;
+
+ if (dmi_check_system(acpi_watchdog_skip))
+ return NULL;
+
+ status = acpi_get_table(ACPI_SIG_WDAT, 0,
+ (struct acpi_table_header **)&wdat);
+ if (ACPI_FAILURE(status)) {
+ /* It is fine if there is no WDAT */
+ return NULL;
+ }
+
+ return wdat;
+}
+
/**
* Returns true if this system should prefer ACPI based watchdog instead of
* the native one (which are typically the same hardware).
*/
bool acpi_has_watchdog(void)
{
- struct acpi_table_header hdr;
-
- if (acpi_disabled)
- return false;
-
- return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
+ return !!acpi_watchdog_get_wdat();
}
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
@@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
struct platform_device *pdev;
struct resource *resources;
size_t nresources = 0;
- acpi_status status;
int i;
- status = acpi_get_table(ACPI_SIG_WDAT, 0,
- (struct acpi_table_header **)&wdat);
- if (ACPI_FAILURE(status)) {
+ wdat = acpi_watchdog_get_wdat();
+ if (!wdat) {
/* It is fine if there is no WDAT */
return;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index e1eee7a60fad..f1cc4f9d31cd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -635,4 +635,26 @@ module_param_call(lid_init_state,
NULL, 0644);
MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
-module_acpi_driver(acpi_button_driver);
+static int acpi_button_register_driver(struct acpi_driver *driver)
+{
+ /*
+ * Modules such as nouveau.ko and i915.ko have a link time dependency
+ * on acpi_lid_open(), and would therefore not be loadable on ACPI
+ * capable kernels booted in non-ACPI mode if the return value of
+ * acpi_bus_register_driver() is returned from here with ACPI disabled
+ * when this driver is built as a module.
+ */
+ if (acpi_disabled)
+ return 0;
+
+ return acpi_bus_register_driver(driver);
+}
+
+static void acpi_button_unregister_driver(struct acpi_driver *driver)
+{
+ if (!acpi_disabled)
+ acpi_bus_unregister_driver(driver);
+}
+
+module_driver(acpi_button_driver, acpi_button_register_driver,
+ acpi_button_unregister_driver);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index cc234e6a6297..970dd87d347c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void)
acpi_cmos_rtc_init();
acpi_container_init();
acpi_memory_hotplug_init();
+ acpi_watchdog_init();
acpi_pnp_init();
acpi_int340x_thermal_init();
acpi_amba_init();
- acpi_watchdog_init();
acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 99a1a650326d..974e58457697 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
},
},
+ /*
+ * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
+ * the Low Power S0 Idle firmware interface (see
+ * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
+ */
+ {
+ .callback = init_no_lps0,
+ .ident = "ThinkPad X1 Tablet(2016)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
+ },
+ },
{},
};
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 44abb8a0a5e5..be076606d30e 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) {
if ((vcc->pop) && (skb1->len != 0))
{
vcc->pop(vcc, skb1);
- IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
+ IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
(long)skb1);)
}
else
@@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev)
status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
if (status & TRANSMIT_DONE){
- IF_EVENT(printk("Tansmit Done Intr logic run\n");)
+ IF_EVENT(printk("Transmit Done Intr logic run\n");)
spin_lock_irqsave(&iadev->tx_lock, flags);
ia_tx_poll(iadev);
spin_unlock_irqrestore(&iadev->tx_lock, flags);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1e6396bb807b..597d40893862 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
- * proceed with mapping memory from generic pools.
+ * Returns 1 if @vaddr belongs to the device coherent pool and the caller
+ * should return @ret, or 0 if they should proceed with mapping memory from
+ * generic areas.
*/
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3b118353ea17..d82566d6e237 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
unsigned long off = vma->vm_pgoff;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
- if (off < count && user_count <= (count - off)) {
+ if (off < count && user_count <= (count - off))
ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
+ page_to_pfn(virt_to_page(cpu_addr)) + off,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
- }
#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
return ret;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c9d04497a415..5d4e31655d96 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
static void lo_complete_rq(struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ blk_status_t ret = BLK_STS_OK;
- if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio &&
- cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) {
- struct bio *bio = cmd->rq->bio;
-
- bio_advance(bio, cmd->ret);
- zero_fill_bio(bio);
+ if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
+ req_op(rq) != REQ_OP_READ) {
+ if (cmd->ret < 0)
+ ret = BLK_STS_IOERR;
+ goto end_io;
}
- blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
+ /*
+ * Short READ - if we got some data, advance our request and
+ * retry it. If we got no data, end the rest with EIO.
+ */
+ if (cmd->ret) {
+ blk_update_request(rq, BLK_STS_OK, cmd->ret);
+ cmd->ret = 0;
+ blk_mq_requeue_request(rq, true);
+ } else {
+ if (cmd->use_aio) {
+ struct bio *bio = rq->bio;
+
+ while (bio) {
+ zero_fill_bio(bio);
+ bio = bio->bi_next;
+ }
+ }
+ ret = BLK_STS_IOERR;
+end_io:
+ blk_mq_end_request(rq, ret);
+ }
}
static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
if (!atomic_dec_and_test(&cmd->ref))
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(rq);
}
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
{
struct iov_iter iter;
struct bio_vec *bvec;
- struct request *rq = cmd->rq;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
struct bio *bio = rq->bio;
struct file *file = lo->lo_backing_file;
unsigned int offset;
@@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer);
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
- struct loop_device *lo = cmd->rq->q->queuedata;
+ struct request *rq = bd->rq;
+ struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct loop_device *lo = rq->q->queuedata;
- blk_mq_start_request(bd->rq);
+ blk_mq_start_request(rq);
if (lo->lo_state != Lo_bound)
return BLK_STS_IOERR;
- switch (req_op(cmd->rq)) {
+ switch (req_op(rq)) {
case REQ_OP_FLUSH:
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
@@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* always use the first bio's css */
#ifdef CONFIG_BLK_CGROUP
- if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) {
- cmd->css = cmd->rq->bio->bi_css;
+ if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
+ cmd->css = rq->bio->bi_css;
css_get(cmd->css);
} else
#endif
@@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
- const bool write = op_is_write(req_op(cmd->rq));
- struct loop_device *lo = cmd->rq->q->queuedata;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ const bool write = op_is_write(req_op(rq));
+ struct loop_device *lo = rq->q->queuedata;
int ret = 0;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
@@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
goto failed;
}
- ret = do_req_filebacked(lo, cmd->rq);
+ ret = do_req_filebacked(lo, rq);
failed:
/* complete non-aio request */
if (!cmd->use_aio || ret) {
cmd->ret = ret ? -EIO : 0;
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(rq);
}
}
@@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
- cmd->rq = rq;
kthread_init_work(&cmd->work, loop_queue_work);
-
return 0;
}
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 0f45416e4fcf..b78de9879f4f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -66,7 +66,6 @@ struct loop_device {
struct loop_cmd {
struct kthread_work work;
- struct request *rq;
bool use_aio; /* use AIO interface to handle I/O */
atomic_t ref; /* only for aio */
long ret;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 07dc5419bd63..8e8b04cc569a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
*/
enum {
Opt_queue_depth,
+ Opt_lock_timeout,
Opt_last_int,
/* int args above */
Opt_last_string,
@@ -740,11 +741,13 @@ enum {
Opt_read_write,
Opt_lock_on_read,
Opt_exclusive,
+ Opt_notrim,
Opt_err
};
static match_table_t rbd_opts_tokens = {
{Opt_queue_depth, "queue_depth=%d"},
+ {Opt_lock_timeout, "lock_timeout=%d"},
/* int args above */
/* string args above */
{Opt_read_only, "read_only"},
@@ -753,20 +756,25 @@ static match_table_t rbd_opts_tokens = {
{Opt_read_write, "rw"}, /* Alternate spelling */
{Opt_lock_on_read, "lock_on_read"},
{Opt_exclusive, "exclusive"},
+ {Opt_notrim, "notrim"},
{Opt_err, NULL}
};
struct rbd_options {
int queue_depth;
+ unsigned long lock_timeout;
bool read_only;
bool lock_on_read;
bool exclusive;
+ bool trim;
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
+#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false
#define RBD_LOCK_ON_READ_DEFAULT false
#define RBD_EXCLUSIVE_DEFAULT false
+#define RBD_TRIM_DEFAULT true
static int parse_rbd_opts_token(char *c, void *private)
{
@@ -796,6 +804,14 @@ static int parse_rbd_opts_token(char *c, void *private)
}
rbd_opts->queue_depth = intval;
break;
+ case Opt_lock_timeout:
+ /* 0 is "wait forever" (i.e. infinite timeout) */
+ if (intval < 0 || intval > INT_MAX / 1000) {
+ pr_err("lock_timeout out of range\n");
+ return -EINVAL;
+ }
+ rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
+ break;
case Opt_read_only:
rbd_opts->read_only = true;
break;
@@ -808,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private)
case Opt_exclusive:
rbd_opts->exclusive = true;
break;
+ case Opt_notrim:
+ rbd_opts->trim = false;
+ break;
default:
/* libceph prints "bad option" msg */
return -EINVAL;
@@ -1392,7 +1411,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req)
case OBJ_OP_DISCARD:
return true;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -2466,7 +2485,7 @@ again:
}
return false;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -2494,7 +2513,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
}
return false;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -3533,9 +3552,22 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
/*
* lock_rwsem must be held for read
*/
-static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
+static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
{
DEFINE_WAIT(wait);
+ unsigned long timeout;
+ int ret = 0;
+
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
+ return -EBLACKLISTED;
+
+ if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
+ return 0;
+
+ if (!may_acquire) {
+ rbd_warn(rbd_dev, "exclusive lock required");
+ return -EROFS;
+ }
do {
/*
@@ -3547,12 +3579,22 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
TASK_UNINTERRUPTIBLE);
up_read(&rbd_dev->lock_rwsem);
- schedule();
+ timeout = schedule_timeout(ceph_timeout_jiffies(
+ rbd_dev->opts->lock_timeout));
down_read(&rbd_dev->lock_rwsem);
- } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
- !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ ret = -EBLACKLISTED;
+ break;
+ }
+ if (!timeout) {
+ rbd_warn(rbd_dev, "timed out waiting for lock");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
finish_wait(&rbd_dev->lock_waitq, &wait);
+ return ret;
}
static void rbd_queue_workfn(struct work_struct *work)
@@ -3638,19 +3680,10 @@ static void rbd_queue_workfn(struct work_struct *work)
(op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
- !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
- if (rbd_dev->opts->exclusive) {
- rbd_warn(rbd_dev, "exclusive lock required");
- result = -EROFS;
- goto err_unlock;
- }
- rbd_wait_state_locked(rbd_dev);
- }
- if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
- result = -EBLACKLISTED;
+ result = rbd_wait_state_locked(rbd_dev,
+ !rbd_dev->opts->exclusive);
+ if (result)
goto err_unlock;
- }
}
img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
@@ -3902,7 +3935,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
struct request_queue *q;
- u64 segment_size;
+ unsigned int objset_bytes =
+ rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
int err;
/* create gendisk info */
@@ -3942,20 +3976,19 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
- /* set io sizes to object size */
- segment_size = rbd_obj_bytes(&rbd_dev->header);
- blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
+ blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
q->limits.max_sectors = queue_max_hw_sectors(q);
blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, UINT_MAX);
- blk_queue_io_min(q, segment_size);
- blk_queue_io_opt(q, segment_size);
+ blk_queue_io_min(q, objset_bytes);
+ blk_queue_io_opt(q, objset_bytes);
- /* enable the discard support */
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- q->limits.discard_granularity = segment_size;
- blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
- blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
+ if (rbd_dev->opts->trim) {
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ q->limits.discard_granularity = objset_bytes;
+ blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
+ blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
+ }
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
@@ -5179,8 +5212,10 @@ static int rbd_add_parse_args(const char *buf,
rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
+ rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
+ rbd_opts->trim = RBD_TRIM_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
mon_addrs + mon_addrs_size - 1,
@@ -5216,6 +5251,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
{
+ int ret;
+
if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
return -EINVAL;
@@ -5223,9 +5260,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
/* FIXME: "rbd map --exclusive" should be in interruptible */
down_read(&rbd_dev->lock_rwsem);
- rbd_wait_state_locked(rbd_dev);
+ ret = rbd_wait_state_locked(rbd_dev, true);
up_read(&rbd_dev->lock_rwsem);
- if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ if (ret) {
rbd_warn(rbd_dev, "failed to acquire exclusive lock");
return -EROFS;
}
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 64e066eba72e..0e31884a9519 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -110,7 +110,7 @@ struct iwm {
/* Select values for swim_select and swim_readbit */
#define READ_DATA_0 0x074
-#define TWOMEG_DRIVE 0x075
+#define ONEMEG_DRIVE 0x075
#define SINGLE_SIDED 0x076
#define DRIVE_PRESENT 0x077
#define DISK_IN 0x170
@@ -118,9 +118,9 @@ struct iwm {
#define TRACK_ZERO 0x172
#define TACHO 0x173
#define READ_DATA_1 0x174
-#define MFM_MODE 0x175
+#define GCR_MODE 0x175
#define SEEK_COMPLETE 0x176
-#define ONEMEG_MEDIA 0x177
+#define TWOMEG_MEDIA 0x177
/* Bits in handshake register */
@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
struct floppy_struct *g;
fs->disk_in = 1;
fs->write_protected = swim_readbit(base, WRITE_PROT);
- fs->type = swim_readbit(base, ONEMEG_MEDIA);
if (swim_track00(base))
printk(KERN_ERR
@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
swim_track00(base);
+ fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
+ HD_MEDIA : DD_MEDIA;
+ fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
get_floppy_geometry(fs, 0, &g);
fs->total_secs = g->size;
fs->secpercyl = g->head * g->sect;
@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
udelay(10);
- swim_drive(base, INTERNAL_DRIVE);
+ swim_drive(base, fs->location);
swim_motor(base, ON);
swim_action(base, SETMFM);
if (fs->ejected)
@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
goto out;
}
+ set_capacity(fs->disk, fs->total_secs);
+
if (mode & FMODE_NDELAY)
return 0;
@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
if (copy_to_user((void __user *) param, (void *) &floppy_type,
sizeof(struct floppy_struct)))
return -EFAULT;
- break;
-
- default:
- printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
- cmd);
- return -ENOSYS;
+ return 0;
}
- return 0;
+ return -ENOTTY;
}
static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
struct swim_priv *swd = data;
int drive = (*part & 3);
- if (drive > swd->floppy_count)
+ if (drive >= swd->floppy_count)
return NULL;
*part = 0;
@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
swim_motor(base, OFF);
- if (swim_readbit(base, SINGLE_SIDED))
- fs->head_number = 1;
- else
- fs->head_number = 2;
+ fs->type = HD_MEDIA;
+ fs->head_number = 2;
+
fs->ref_count = 0;
fs->ejected = 1;
@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
/* scan floppy drives */
swim_drive(base, INTERNAL_DRIVE);
- if (swim_readbit(base, DRIVE_PRESENT))
+ if (swim_readbit(base, DRIVE_PRESENT) &&
+ !swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, INTERNAL_DRIVE);
swim_drive(base, EXTERNAL_DRIVE);
- if (swim_readbit(base, DRIVE_PRESENT))
+ if (swim_readbit(base, DRIVE_PRESENT) &&
+ !swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, EXTERNAL_DRIVE);
/* register floppy drives */
@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
&swd->lock);
if (!swd->unit[drive].disk->queue) {
err = -ENOMEM;
- put_disk(swd->unit[drive].disk);
goto exit_put_disks;
}
blk_queue_bounce_limit(swd->unit[drive].disk->queue,
@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
goto out;
}
- swim_base = ioremap(res->start, resource_size(res));
+ swim_base = (struct swim __iomem *)res->start;
if (!swim_base) {
ret = -ENOMEM;
goto out_release_io;
@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
if (!get_swim_mode(swim_base)) {
printk(KERN_INFO "SWIM device not found !\n");
ret = -ENODEV;
- goto out_iounmap;
+ goto out_release_io;
}
/* set platform driver data */
@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
if (!swd) {
ret = -ENOMEM;
- goto out_iounmap;
+ goto out_release_io;
}
platform_set_drvdata(dev, swd);
@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
out_kfree:
kfree(swd);
-out_iounmap:
- iounmap(swim_base);
out_release_io:
release_mem_region(res->start, resource_size(res));
out:
@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
for (drive = 0; drive < swd->floppy_count; drive++)
floppy_eject(&swd->unit[drive]);
- iounmap(swd->base);
-
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index af51015d056e..469541c1e51e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -148,7 +148,7 @@ struct swim3 {
#define MOTOR_ON 2
#define RELAX 3 /* also eject in progress */
#define READ_DATA_0 4
-#define TWOMEG_DRIVE 5
+#define ONEMEG_DRIVE 5
#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
#define DRIVE_PRESENT 7
#define DISK_IN 8
@@ -156,9 +156,9 @@ struct swim3 {
#define TRACK_ZERO 10
#define TACHO 11
#define READ_DATA_1 12
-#define MFM_MODE 13
+#define GCR_MODE 13
#define SEEK_COMPLETE 14
-#define ONEMEG_MEDIA 15
+#define TWOMEG_MEDIA 15
/* Definitions of values used in writing and formatting */
#define DATA_ESCAPE 0x99
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8327478effd0..bfc566d3f31a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
- if ((unsigned int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e027e7fa1472..cd888d4ee605 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -261,6 +261,7 @@
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
+#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
@@ -427,8 +428,9 @@ struct crng_state primary_crng = {
* its value (from 0->1->2).
*/
static int crng_init = 0;
-#define crng_ready() (likely(crng_init > 0))
+#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
+static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
__u32 out[CHACHA20_BLOCK_WORDS]);
@@ -437,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
+static struct ratelimit_state unseeded_warning =
+ RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
+static struct ratelimit_state urandom_warning =
+ RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+
+static int ratelimit_disable __read_mostly;
+
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
/**********************************************************************
*
* OS independent entropy store. Here are the functions which handle
@@ -787,6 +799,43 @@ static void crng_initialize(struct crng_state *crng)
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
+#ifdef CONFIG_NUMA
+static void do_numa_crng_init(struct work_struct *work)
+{
+ int i;
+ struct crng_state *crng;
+ struct crng_state **pool;
+
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+ for_each_online_node(i) {
+ crng = kmalloc_node(sizeof(struct crng_state),
+ GFP_KERNEL | __GFP_NOFAIL, i);
+ spin_lock_init(&crng->lock);
+ crng_initialize(crng);
+ pool[i] = crng;
+ }
+ mb();
+ if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ for_each_node(i)
+ kfree(pool[i]);
+ kfree(pool);
+ }
+}
+
+static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+
+static void numa_crng_init(void)
+{
+ schedule_work(&numa_crng_init_work);
+}
+#else
+static void numa_crng_init(void) {}
+#endif
+
+/*
+ * crng_fast_load() can be called by code in the interrupt service
+ * path. So we can't afford to dilly-dally.
+ */
static int crng_fast_load(const char *cp, size_t len)
{
unsigned long flags;
@@ -794,7 +843,7 @@ static int crng_fast_load(const char *cp, size_t len)
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
- if (crng_ready()) {
+ if (crng_init != 0) {
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
}
@@ -813,6 +862,51 @@ static int crng_fast_load(const char *cp, size_t len)
return 1;
}
+/*
+ * crng_slow_load() is called by add_device_randomness, which has two
+ * attributes. (1) We can't trust the buffer passed to it is
+ * guaranteed to be unpredictable (so it might not have any entropy at
+ * all), and (2) it doesn't have the performance constraints of
+ * crng_fast_load().
+ *
+ * So we do something more comprehensive which is guaranteed to touch
+ * all of the primary_crng's state, and which uses a LFSR with a
+ * period of 255 as part of the mixing algorithm. Finally, we do
+ * *not* advance crng_init_cnt since buffer we may get may be something
+ * like a fixed DMI table (for example), which might very well be
+ * unique to the machine, but is otherwise unvarying.
+ */
+static int crng_slow_load(const char *cp, size_t len)
+{
+ unsigned long flags;
+ static unsigned char lfsr = 1;
+ unsigned char tmp;
+ unsigned i, max = CHACHA20_KEY_SIZE;
+ const char * src_buf = cp;
+ char * dest_buf = (char *) &primary_crng.state[4];
+
+ if (!spin_trylock_irqsave(&primary_crng.lock, flags))
+ return 0;
+ if (crng_init != 0) {
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
+ return 0;
+ }
+ if (len > max)
+ max = len;
+
+ for (i = 0; i < max ; i++) {
+ tmp = lfsr;
+ lfsr >>= 1;
+ if (tmp & 1)
+ lfsr ^= 0xE1;
+ tmp = dest_buf[i % CHACHA20_KEY_SIZE];
+ dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
+ lfsr += (tmp << 3) | (tmp >> 5);
+ }
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
+ return 1;
+}
+
static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
{
unsigned long flags;
@@ -831,7 +925,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
_crng_backtrack_protect(&primary_crng, buf.block,
CHACHA20_KEY_SIZE);
}
- spin_lock_irqsave(&primary_crng.lock, flags);
+ spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) {
unsigned long rv;
if (!arch_get_random_seed_long(&rv) &&
@@ -841,13 +935,26 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies;
- spin_unlock_irqrestore(&primary_crng.lock, flags);
+ spin_unlock_irqrestore(&crng->lock, flags);
if (crng == &primary_crng && crng_init < 2) {
invalidate_batched_entropy();
+ numa_crng_init();
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("random: %d get_random_xx warning(s) missed "
+ "due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("random: %d urandom warning(s) missed "
+ "due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
}
}
@@ -856,8 +963,9 @@ static void _extract_crng(struct crng_state *crng,
{
unsigned long v, flags;
- if (crng_init > 1 &&
- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
+ if (crng_ready() &&
+ (time_after(crng_global_init_time, crng->init_time) ||
+ time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
spin_lock_irqsave(&crng->lock, flags);
if (arch_get_random_long(&v))
@@ -981,10 +1089,8 @@ void add_device_randomness(const void *buf, unsigned int size)
unsigned long time = random_get_entropy() ^ jiffies;
unsigned long flags;
- if (!crng_ready()) {
- crng_fast_load(buf, size);
- return;
- }
+ if (!crng_ready() && size)
+ crng_slow_load(buf, size);
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
@@ -1139,7 +1245,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
- if (!crng_ready()) {
+ if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
crng_fast_load((char *) fast_pool->pool,
sizeof(fast_pool->pool))) {
@@ -1489,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
print_once = true;
#endif
- pr_notice("random: %s called from %pS with crng_init=%d\n",
- func_name, caller, crng_init);
+ if (__ratelimit(&unseeded_warning))
+ pr_notice("random: %s called from %pS with crng_init=%d\n",
+ func_name, caller, crng_init);
}
/*
@@ -1680,28 +1787,14 @@ static void init_std_data(struct entropy_store *r)
*/
static int rand_initialize(void)
{
-#ifdef CONFIG_NUMA
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-#endif
-
init_std_data(&input_pool);
init_std_data(&blocking_pool);
crng_initialize(&primary_crng);
-
-#ifdef CONFIG_NUMA
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize(crng);
- pool[i] = crng;
+ crng_global_init_time = jiffies;
+ if (ratelimit_disable) {
+ urandom_warning.interval = 0;
+ unseeded_warning.interval = 0;
}
- mb();
- crng_node_pool = pool;
-#endif
return 0;
}
early_initcall(rand_initialize);
@@ -1769,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (!crng_ready() && maxwarn > 0) {
maxwarn--;
- printk(KERN_NOTICE "random: %s: uninitialized urandom read "
- "(%zd bytes read)\n",
- current->comm, nbytes);
+ if (__ratelimit(&urandom_warning))
+ printk(KERN_NOTICE "random: %s: uninitialized "
+ "urandom read (%zd bytes read)\n",
+ current->comm, nbytes);
spin_lock_irqsave(&primary_crng.lock, flags);
crng_init_cnt = 0;
spin_unlock_irqrestore(&primary_crng.lock, flags);
@@ -1875,6 +1969,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
input_pool.entropy_count = 0;
blocking_pool.entropy_count = 0;
return 0;
+ case RNDRESEEDCRNG:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (crng_init < 2)
+ return -ENODATA;
+ crng_reseed(&primary_crng, NULL);
+ crng_global_init_time = jiffies - 1;
+ return 0;
default:
return -EINVAL;
}
@@ -2212,7 +2314,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
{
struct entropy_store *poolp = &input_pool;
- if (!crng_ready()) {
+ if (unlikely(crng_init == 0)) {
crng_fast_load(buffer, count);
return;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 9ee2888275c1..8e8a09755d10 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -133,6 +133,14 @@ config VT8500_TIMER
help
Enables support for the VT8500 driver.
+config NPCM7XX_TIMER
+ bool "NPCM7xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
+ While TIMER0 serves as clockevent and TIMER1 serves as clocksource.
+
config CADENCE_TTC_TIMER
bool "Cadence TTC timer driver" if COMPILE_TEST
depends on COMMON_CLK
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index e8e76dfef00b..00caf37e52f9 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += owl-timer.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
+obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index 21bffdcb2f20..6c8318470b48 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -17,9 +17,14 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#define TPM_PARAM 0x4
+#define TPM_PARAM_WIDTH_SHIFT 16
+#define TPM_PARAM_WIDTH_MASK (0xff << 16)
#define TPM_SC 0x10
#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
#define TPM_SC_CMOD_DIV_DEFAULT 0x3
+#define TPM_SC_CMOD_DIV_MAX 0x7
+#define TPM_SC_TOF_MASK (0x1 << 7)
#define TPM_CNT 0x14
#define TPM_MOD 0x18
#define TPM_STATUS 0x1c
@@ -29,8 +34,11 @@
#define TPM_C0SC_MODE_SHIFT 2
#define TPM_C0SC_MODE_MASK 0x3c
#define TPM_C0SC_MODE_SW_COMPARE 0x4
+#define TPM_C0SC_CHF_MASK (0x1 << 7)
#define TPM_C0V 0x24
+static int counter_width;
+static int rating;
static void __iomem *timer_base;
static struct clock_event_device clockevent_tpm;
@@ -83,10 +91,11 @@ static int __init tpm_clocksource_init(unsigned long rate)
tpm_delay_timer.freq = rate;
register_current_timer_delay(&tpm_delay_timer);
- sched_clock_register(tpm_read_sched_clock, 32, rate);
+ sched_clock_register(tpm_read_sched_clock, counter_width, rate);
return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm",
- rate, 200, 32, clocksource_mmio_readl_up);
+ rate, rating, counter_width,
+ clocksource_mmio_readl_up);
}
static int tpm_set_next_event(unsigned long delta,
@@ -105,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta,
* of writing CNT registers which may cause the min_delta event got
* missed, so we need add a ETIME check here in case it happened.
*/
- return (int)((next - now) <= 0) ? -ETIME : 0;
+ return (int)(next - now) <= 0 ? -ETIME : 0;
}
static int tpm_set_state_oneshot(struct clock_event_device *evt)
@@ -139,7 +148,6 @@ static struct clock_event_device clockevent_tpm = {
.set_state_oneshot = tpm_set_state_oneshot,
.set_next_event = tpm_set_next_event,
.set_state_shutdown = tpm_set_state_shutdown,
- .rating = 200,
};
static int __init tpm_clockevent_init(unsigned long rate, int irq)
@@ -149,10 +157,11 @@ static int __init tpm_clockevent_init(unsigned long rate, int irq)
ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"i.MX7ULP TPM Timer", &clockevent_tpm);
+ clockevent_tpm.rating = rating;
clockevent_tpm.cpumask = cpumask_of(0);
clockevent_tpm.irq = irq;
- clockevents_config_and_register(&clockevent_tpm,
- rate, 300, 0xfffffffe);
+ clockevents_config_and_register(&clockevent_tpm, rate, 300,
+ GENMASK(counter_width - 1, 1));
return ret;
}
@@ -179,7 +188,7 @@ static int __init tpm_timer_init(struct device_node *np)
ipg = of_clk_get_by_name(np, "ipg");
per = of_clk_get_by_name(np, "per");
if (IS_ERR(ipg) || IS_ERR(per)) {
- pr_err("tpm: failed to get igp or per clk\n");
+ pr_err("tpm: failed to get ipg or per clk\n");
ret = -ENODEV;
goto err_clk_get;
}
@@ -197,6 +206,11 @@ static int __init tpm_timer_init(struct device_node *np)
goto err_per_clk_enable;
}
+ counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK)
+ >> TPM_PARAM_WIDTH_SHIFT;
+ /* use rating 200 for 32-bit counter and 150 for 16-bit counter */
+ rating = counter_width == 0x20 ? 200 : 150;
+
/*
* Initialize tpm module to a known state
* 1) Counter disabled
@@ -205,16 +219,25 @@ static int __init tpm_timer_init(struct device_node *np)
* 4) Channel0 disabled
* 5) DMA transfers disabled
*/
+ /* make sure counter is disabled */
writel(0, timer_base + TPM_SC);
+ /* TOF is W1C */
+ writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
writel(0, timer_base + TPM_CNT);
- writel(0, timer_base + TPM_C0SC);
+ /* CHF is W1C */
+ writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
- /* increase per cnt, div 8 by default */
- writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT,
+ /*
+ * increase per cnt,
+ * div 8 for 32-bit counter and div 128 for 16-bit counter
+ */
+ writel(TPM_SC_CMOD_INC_PER_CNT |
+ (counter_width == 0x20 ?
+ TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX),
timer_base + TPM_SC);
/* set MOD register to maximum for free running mode */
- writel(0xffffffff, timer_base + TPM_MOD);
+ writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD);
rate = clk_get_rate(per) >> 3;
ret = tpm_clocksource_init(rate);
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c
new file mode 100644
index 000000000000..7a9bb5532d99
--- /dev/null
+++ b/drivers/clocksource/timer-npcm7xx.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com
+ * All rights reserved.
+ *
+ * Copyright 2017 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include "timer-of.h"
+
+/* Timers registers */
+#define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */
+#define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */
+#define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */
+#define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */
+#define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */
+#define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */
+
+/* Timers control */
+#define NPCM7XX_Tx_RESETINT 0x1f
+#define NPCM7XX_Tx_PERIOD BIT(27)
+#define NPCM7XX_Tx_INTEN BIT(29)
+#define NPCM7XX_Tx_COUNTEN BIT(30)
+#define NPCM7XX_Tx_ONESHOT 0x0
+#define NPCM7XX_Tx_OPER GENMASK(3, 27)
+#define NPCM7XX_Tx_MIN_PRESCALE 0x1
+#define NPCM7XX_Tx_TDR_MASK_BITS 24
+#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF
+#define NPCM7XX_T0_CLR_INT 0x1
+#define NPCM7XX_Tx_CLR_CSR 0x0
+
+/* Timers operating mode */
+#define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \
+ NPCM7XX_Tx_INTEN | \
+ NPCM7XX_Tx_MIN_PRESCALE)
+
+#define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \
+ NPCM7XX_Tx_INTEN | \
+ NPCM7XX_Tx_MIN_PRESCALE)
+
+#define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \
+ NPCM7XX_Tx_MIN_PRESCALE)
+
+#define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE)
+
+static int npcm7xx_timer_resume(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ u32 val;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val |= NPCM7XX_Tx_COUNTEN;
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static int npcm7xx_timer_shutdown(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ u32 val;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val &= ~NPCM7XX_Tx_COUNTEN;
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static int npcm7xx_timer_oneshot(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ u32 val;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val &= ~NPCM7XX_Tx_OPER;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val |= NPCM7XX_START_ONESHOT_Tx;
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static int npcm7xx_timer_periodic(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ u32 val;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val &= ~NPCM7XX_Tx_OPER;
+
+ writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0);
+ val |= NPCM7XX_START_PERIODIC_Tx;
+
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static int npcm7xx_clockevent_set_next_event(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+ u32 val;
+
+ writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0);
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val |= NPCM7XX_START_Tx;
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(evt);
+
+ writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct timer_of npcm7xx_to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = "npcm7xx-timer0",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = npcm7xx_clockevent_set_next_event,
+ .set_state_shutdown = npcm7xx_timer_shutdown,
+ .set_state_periodic = npcm7xx_timer_periodic,
+ .set_state_oneshot = npcm7xx_timer_oneshot,
+ .tick_resume = npcm7xx_timer_resume,
+ .rating = 300,
+ },
+
+ .of_irq = {
+ .handler = npcm7xx_timer0_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static void __init npcm7xx_clockevents_init(void)
+{
+ writel(NPCM7XX_DEFAULT_CSR,
+ timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0);
+
+ writel(NPCM7XX_Tx_RESETINT,
+ timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR);
+
+ npcm7xx_to.clkevt.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&npcm7xx_to.clkevt,
+ timer_of_rate(&npcm7xx_to),
+ 0x1, NPCM7XX_Tx_MAX_CNT);
+}
+
+static void __init npcm7xx_clocksource_init(void)
+{
+ u32 val;
+
+ writel(NPCM7XX_DEFAULT_CSR,
+ timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
+ writel(NPCM7XX_Tx_MAX_CNT,
+ timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1);
+
+ val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
+ val |= NPCM7XX_START_Tx;
+ writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
+
+ clocksource_mmio_init(timer_of_base(&npcm7xx_to) +
+ NPCM7XX_REG_TDR1,
+ "npcm7xx-timer1", timer_of_rate(&npcm7xx_to),
+ 200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS,
+ clocksource_mmio_readl_down);
+}
+
+static int __init npcm7xx_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &npcm7xx_to);
+ if (ret)
+ return ret;
+
+ /* Clock input is divided by PRESCALE + 1 before it is fed */
+ /* to the counter */
+ npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate /
+ (NPCM7XX_Tx_MIN_PRESCALE + 1);
+
+ npcm7xx_clocksource_init();
+ npcm7xx_clockevents_init();
+
+ pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ",
+ timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to));
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init);
+
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 7f56fe5183f2..de55c7d57438 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
-config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
- bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
- depends on ARM_BRCMSTB_AVS_CPUFREQ
- help
- Enabling this option turns on debug support via sysfs under
- /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
- write some AVS mailbox registers through sysfs entries.
-
- If in doubt, say N.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 6cdac1aaf23c..b07559b9ed99 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -49,13 +49,6 @@
#include <linux/platform_device.h>
#include <linux/semaphore.h>
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#endif
-
/* Max number of arguments AVS calls take */
#define AVS_MAX_CMD_ARGS 4
/*
@@ -182,88 +175,11 @@ struct private_data {
void __iomem *base;
void __iomem *avs_intr_base;
struct device *dev;
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
- struct dentry *debugfs;
-#endif
struct completion done;
struct semaphore sem;
struct pmap pmap;
};
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-
-enum debugfs_format {
- DEBUGFS_NORMAL,
- DEBUGFS_FLOAT,
- DEBUGFS_REV,
-};
-
-struct debugfs_data {
- struct debugfs_entry *entry;
- struct private_data *priv;
-};
-
-struct debugfs_entry {
- char *name;
- u32 offset;
- fmode_t mode;
- enum debugfs_format format;
-};
-
-#define DEBUGFS_ENTRY(name, mode, format) { \
- #name, AVS_MBOX_##name, mode, format \
-}
-
-/*
- * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
- */
-#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
-#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
-#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
-#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
-
-/*
- * This table stores the name, access permissions and offset for each hardware
- * register and is used to generate debugfs entries.
- */
-static struct debugfs_entry debugfs_entries[] = {
- DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
- DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
-};
-
-static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
-
-static char *__strtolower(char *s)
-{
- char *p;
-
- for (p = s; *p; p++)
- *p = tolower(*p);
-
- return s;
-}
-
-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
-
static void __iomem *__map_region(const char *name)
{
struct device_node *np;
@@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
return table;
}
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-
-#define MANT(x) (unsigned int)(abs((x)) / 1000)
-#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
-
-static int brcm_avs_debug_show(struct seq_file *s, void *data)
-{
- struct debugfs_data *dbgfs = s->private;
- void __iomem *base;
- u32 val, offset;
-
- if (!dbgfs) {
- seq_puts(s, "No device pointer\n");
- return 0;
- }
-
- base = dbgfs->priv->base;
- offset = dbgfs->entry->offset;
- val = readl(base + offset);
- switch (dbgfs->entry->format) {
- case DEBUGFS_NORMAL:
- seq_printf(s, "%u\n", val);
- break;
- case DEBUGFS_FLOAT:
- seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
- break;
- case DEBUGFS_REV:
- seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
- (val >> 16 & 0xff), (val >> 8 & 0xff),
- val & 0xff);
- break;
- }
- seq_printf(s, "0x%08x\n", val);
-
- return 0;
-}
-
-#undef MANT
-#undef FRAC
-
-static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct seq_file *s = file->private_data;
- struct debugfs_data *dbgfs = s->private;
- struct private_data *priv = dbgfs->priv;
- void __iomem *base, *avs_intr_base;
- bool use_issue_command = false;
- unsigned long val, offset;
- char str[128];
- int ret;
- char *str_ptr = str;
-
- if (size >= sizeof(str))
- return -E2BIG;
-
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, size);
- if (ret)
- return ret;
-
- base = priv->base;
- avs_intr_base = priv->avs_intr_base;
- offset = dbgfs->entry->offset;
- /*
- * Special case writing to "command" entry only: if the string starts
- * with a 'c', we use the driver's __issue_avs_command() function.
- * Otherwise, we perform a raw write. This should allow testing of raw
- * access as well as using the higher level function. (Raw access
- * doesn't clear the firmware return status after issuing the command.)
- */
- if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
- use_issue_command = true;
- str_ptr++;
- }
- if (kstrtoul(str_ptr, 0, &val) != 0)
- return -EINVAL;
-
- /*
- * Setting the P-state is a special case. We need to update the CPU
- * frequency we report.
- */
- if (val == AVS_CMD_SET_PSTATE) {
- struct cpufreq_policy *policy;
- unsigned int pstate;
-
- policy = cpufreq_cpu_get(smp_processor_id());
- /* Read back the P-state we are about to set */
- pstate = readl(base + AVS_MBOX_PARAM(0));
- if (use_issue_command) {
- ret = brcm_avs_target_index(policy, pstate);
- return ret ? ret : size;
- }
- policy->cur = policy->freq_table[pstate].frequency;
- }
-
- if (use_issue_command) {
- ret = __issue_avs_command(priv, val, false, NULL);
- } else {
- /* Locking here is not perfect, but is only for debug. */
- ret = down_interruptible(&priv->sem);
- if (ret)
- return ret;
-
- writel(val, base + offset);
- /* We have to wake up the firmware to process a command. */
- if (offset == AVS_MBOX_COMMAND)
- writel(AVS_CPU_L2_INT_MASK,
- avs_intr_base + AVS_CPU_L2_SET0);
- up(&priv->sem);
- }
-
- return ret ? ret : size;
-}
-
-static struct debugfs_entry *__find_debugfs_entry(const char *name)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
- if (strcasecmp(debugfs_entries[i].name, name) == 0)
- return &debugfs_entries[i];
-
- return NULL;
-}
-
-static int brcm_avs_debug_open(struct inode *inode, struct file *file)
-{
- struct debugfs_data *data;
- fmode_t fmode;
- int ret;
-
- /*
- * seq_open(), which is called by single_open(), clears "write" access.
- * We need write access to some files, so we preserve our access mode
- * and restore it.
- */
- fmode = file->f_mode;
- /*
- * Check access permissions even for root. We don't want to be writing
- * to read-only registers. Access for regular users has already been
- * checked by the VFS layer.
- */
- if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
- return -EACCES;
-
- data = kmalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- /*
- * We use the same file system operations for all our debug files. To
- * produce specific output, we look up the file name upon opening a
- * debugfs entry and map it to a memory offset. This offset is then used
- * in the generic "show" function to read a specific register.
- */
- data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
- data->priv = inode->i_private;
-
- ret = single_open(file, brcm_avs_debug_show, data);
- if (ret)
- kfree(data);
- file->f_mode = fmode;
-
- return ret;
-}
-
-static int brcm_avs_debug_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq_priv = file->private_data;
- struct debugfs_data *data = seq_priv->private;
-
- kfree(data);
- return single_release(inode, file);
-}
-
-static const struct file_operations brcm_avs_debug_ops = {
- .open = brcm_avs_debug_open,
- .read = seq_read,
- .write = brcm_avs_seq_write,
- .llseek = seq_lseek,
- .release = brcm_avs_debug_release,
-};
-
-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
-{
- struct private_data *priv = platform_get_drvdata(pdev);
- struct dentry *dir;
- int i;
-
- if (!priv)
- return;
-
- dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
- if (IS_ERR_OR_NULL(dir))
- return;
- priv->debugfs = dir;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
- /*
- * The DEBUGFS_ENTRY macro generates uppercase strings. We
- * convert them to lowercase before creating the debugfs
- * entries.
- */
- char *entry = __strtolower(debugfs_entries[i].name);
- fmode_t mode = debugfs_entries[i].mode;
-
- if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
- dir, priv, &brcm_avs_debug_ops)) {
- priv->debugfs = NULL;
- debugfs_remove_recursive(dir);
- break;
- }
- }
-}
-
-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
-{
- struct private_data *priv = platform_get_drvdata(pdev);
-
- if (priv && priv->debugfs) {
- debugfs_remove_recursive(priv->debugfs);
- priv->debugfs = NULL;
- }
-}
-
-#else
-
-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
-
-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
-
/*
* To ensure the right firmware is running we need to
* - check the MAGIC matches what we expect
@@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
return ret;
brcm_avs_driver.driver_data = pdev;
- ret = cpufreq_register_driver(&brcm_avs_driver);
- if (!ret)
- brcm_avs_cpufreq_debug_init(pdev);
- return ret;
+ return cpufreq_register_driver(&brcm_avs_driver);
}
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
@@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
if (ret)
return ret;
- brcm_avs_cpufreq_debug_exit(pdev);
-
priv = platform_get_drvdata(pdev);
iounmap(priv->base);
iounmap(priv->avs_intr_base);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index be8606457f27..aff2c1594220 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -19,6 +19,7 @@
#include <linux/dax.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/mman.h>
#include "dax-private.h"
#include "dax.h"
@@ -540,6 +541,7 @@ static const struct file_operations dax_fops = {
.release = dax_release,
.get_unmapped_area = dax_get_unmapped_area,
.mmap = dax_mmap,
+ .mmap_supported_flags = MAP_SYNC,
};
static void dev_dax_release(struct device *dev)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index f6cb502c303f..25f064c01038 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
lut = (struct drm_color_lut *)blob->data;
lut_size = blob->length / sizeof(struct drm_color_lut);
- if (__is_lut_linear(lut, lut_size)) {
- /* Set to bypass if lut is set to linear */
- stream->out_transfer_func->type = TF_TYPE_BYPASS;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
- return 0;
- }
-
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index add90675fd2a..26fbeafc3c96 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4743,23 +4743,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
for (i=0; i < dep_table->count; i++) {
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
- break;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+ return;
}
}
- if (i == dep_table->count)
+ if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ }
dep_table = table_info->vdd_dep_on_sclk;
odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
for (i=0; i < dep_table->count; i++) {
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
- break;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+ return;
}
}
- if (i == dep_table->count)
+ if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ }
}
static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index fb696e3d06cf..2f8a3b983cce 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,8 +412,10 @@ typedef struct {
QuadraticInt_t ReservedEquation2;
QuadraticInt_t ReservedEquation3;
+ uint16_t MinVoltageUlvGfx;
+ uint16_t MinVoltageUlvSoc;
- uint32_t Reserved[15];
+ uint32_t Reserved[14];
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 02a50929af67..e7f4fe2848a5 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
{
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
ssize_t ret;
+ int retry;
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 0;
- ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
- &tmds_oen, sizeof(tmds_oen));
- if (ret) {
- DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
- enable ? "enable" : "disable");
- return ret;
+ /*
+ * LSPCON adapters in low-power state may ignore the first write, so
+ * read back and verify the written value a few times.
+ */
+ for (retry = 0; retry < 3; retry++) {
+ uint8_t tmp;
+
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmds_oen, sizeof(tmds_oen));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
+ enable ? "enable" : "disable",
+ retry + 1);
+ return ret;
+ }
+
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmp, sizeof(tmp));
+ if (ret) {
+ DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
+ enable ? "enabling" : "disabling",
+ retry + 1);
+ return ret;
+ }
+
+ if (tmp == tmds_oen)
+ return 0;
}
- return 0;
+ DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
+ enable ? "enabling" : "disabling");
+
+ return -EIO;
}
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 0faaf829f5bf..f0e79178bde6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -26,20 +27,6 @@
#include "exynos_drm_iommu.h"
#include "exynos_drm_crtc.h"
-#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
-
-/*
- * exynos specific framebuffer structure.
- *
- * @fb: drm framebuffer obejct.
- * @exynos_gem: array of exynos specific gem object containing a gem object.
- */
-struct exynos_drm_fb {
- struct drm_framebuffer fb;
- struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
- dma_addr_t dma_addr[MAX_FB_BUFFER];
-};
-
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
struct exynos_drm_gem *exynos_gem)
{
@@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
return 0;
}
-static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- unsigned int i;
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) {
- struct drm_gem_object *obj;
-
- if (exynos_fb->exynos_gem[i] == NULL)
- continue;
-
- obj = &exynos_fb->exynos_gem[i]->base;
- drm_gem_object_unreference_unlocked(obj);
- }
-
- kfree(exynos_fb);
- exynos_fb = NULL;
-}
-
-static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
-
- return drm_gem_handle_create(file_priv,
- &exynos_fb->exynos_gem[0]->base, handle);
-}
-
static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
- .destroy = exynos_drm_fb_destroy,
- .create_handle = exynos_drm_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
struct drm_framebuffer *
@@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct exynos_drm_gem **exynos_gem,
int count)
{
- struct exynos_drm_fb *exynos_fb;
+ struct drm_framebuffer *fb;
int i;
int ret;
- exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
- if (!exynos_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
@@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
if (ret < 0)
goto err;
- exynos_fb->exynos_gem[i] = exynos_gem[i];
- exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
- + mode_cmd->offsets[i];
+ fb->obj[i] = &exynos_gem[i]->base;
}
- drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
- ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
DRM_ERROR("failed to initialize framebuffer\n");
goto err;
}
- return &exynos_fb->fb;
+ return fb;
err:
- kfree(exynos_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
@@ -191,12 +145,13 @@ err:
dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
{
- struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ struct exynos_drm_gem *exynos_gem;
if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
return 0;
- return exynos_fb->dma_addr[index];
+ exynos_gem = to_exynos_gem(fb->obj[index]);
+ return exynos_gem->dma_addr + fb->offsets[index];
}
static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index db6b94dda5df..d85939bd7b47 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1080,6 +1080,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
s->workload->pending_events);
+ patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index dd96ffc878ac..6d8180e8d1e2 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ int pipe;
+
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
@@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
if (IS_BROADWELL(dev_priv))
vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+ /* Disable Primary/Sprite/Cursor plane */
+ for_each_pipe(dev_priv, pipe) {
+ vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
+ vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
+ }
+
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index b555eb26f9ce..6f4f8e941fc2 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
struct intel_vgpu_fb_info *fb_info)
{
gvt_dmabuf->drm_format = fb_info->drm_format;
+ gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
gvt_dmabuf->width = fb_info->width;
gvt_dmabuf->height = fb_info->height;
gvt_dmabuf->stride = fb_info->stride;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 6b50fe78dc1b..1c120683e958 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
return -EINVAL;
- }
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n",
+ plane->base);
return -EINVAL;
}
@@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
alpha_plane, alpha_force);
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
return -EINVAL;
- }
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n",
+ plane->base);
return -EINVAL;
}
@@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format;
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
return -EINVAL;
- }
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n",
+ plane->base);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index d29281231507..78e55aafc8bc 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
false, 0, mm->vgpu);
}
+static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+ pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
+}
+
static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_entry *entry, unsigned long index)
{
@@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
return ret;
}
+static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *entry)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long pfn;
+
+ pfn = pte_ops->get_pfn(entry);
+ if (pfn != vgpu->gvt->gtt.scratch_mfn)
+ intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
+ pfn << PAGE_SHIFT);
+}
+
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
@@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
- m = e;
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
+ m = e;
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
@@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else
ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
- } else
+ } else {
+ ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
+ ggtt_invalidate_pte(vgpu, &m);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ ops->clear_present(&m);
+ }
out:
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
@@ -2030,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
return PTR_ERR(gtt->ggtt_mm);
}
- intel_vgpu_reset_ggtt(vgpu);
+ intel_vgpu_reset_ggtt(vgpu, false);
return create_scratch_page_tree(vgpu);
}
@@ -2315,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
/**
* intel_vgpu_reset_ggtt - reset the GGTT entry
* @vgpu: a vGPU
+ * @invalidate_old: invalidate old entries
*
* This function is called at the vGPU create stage
* to reset all the GGTT entries.
*
*/
-void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
+ struct intel_gvt_gtt_entry old_entry;
u32 index;
u32 num_entries;
@@ -2334,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
- while (num_entries--)
+ while (num_entries--) {
+ if (invalidate_old) {
+ ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
+ ggtt_invalidate_pte(vgpu, &old_entry);
+ }
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
+ }
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
- while (num_entries--)
+ while (num_entries--) {
+ if (invalidate_old) {
+ ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
+ ggtt_invalidate_pte(vgpu, &old_entry);
+ }
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
+ }
ggtt_invalidate(dev_priv);
}
@@ -2360,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
* removing the shadow pages.
*/
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
- intel_vgpu_reset_ggtt(vgpu);
+ intel_vgpu_reset_ggtt(vgpu, true);
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index a8b369cd352b..3792f2b7f4ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -193,7 +193,7 @@ struct intel_vgpu_gtt {
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
-void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8c5d5d005854..a33c1c3e4a21 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+ /* fall through */
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
return PTR_ERR_OR_ZERO(mm);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c16a492449d7..1466d8769ec9 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1301,7 +1301,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
}
- return 0;
+ return -ENOTTY;
}
static ssize_t
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 84ca369f15a5..3b4daafebdcb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1105,30 +1105,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
- return ret;
+ goto err_perf;
- /* WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over. */
+ /*
+ * WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over.
+ */
ret = i915_kick_out_firmware_fb(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
- goto out_ggtt;
+ goto err_ggtt;
}
ret = i915_kick_out_vgacon(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n");
- goto out_ggtt;
+ goto err_ggtt;
}
ret = i915_ggtt_init_hw(dev_priv);
if (ret)
- return ret;
+ goto err_ggtt;
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
DRM_ERROR("failed to enable GGTT\n");
- goto out_ggtt;
+ goto err_ggtt;
}
pci_set_master(pdev);
@@ -1139,7 +1141,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto out_ggtt;
+ goto err_ggtt;
}
}
@@ -1157,7 +1159,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto out_ggtt;
+ goto err_ggtt;
}
}
@@ -1190,13 +1192,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = intel_gvt_init(dev_priv);
if (ret)
- goto out_ggtt;
+ goto err_ggtt;
return 0;
-out_ggtt:
+err_ggtt:
i915_ggtt_cleanup_hw(dev_priv);
-
+err_perf:
+ i915_perf_fini(dev_priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8c170db8495d..0414228cd2b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
- kfree(lut);
+ kmem_cache_free(eb->i915->luts, lut);
goto err_obj;
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d8feb9053e0c..f0519e31543a 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915)
spin_lock_irqsave(&i915->pmu.lock, flags);
spin_lock(&kdev->power.lock);
- if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- i915->pmu.suspended_jiffies_last =
- kdev->power.suspended_jiffies;
+ /*
+ * After the above branch intel_runtime_pm_get_if_in_use failed
+ * to get the runtime PM reference we cannot assume we are in
+ * runtime suspend since we can either: a) race with coming out
+ * of it before we took the power.lock, or b) there are other
+ * states than suspended which can bring us here.
+ *
+ * We need to double-check that we are indeed currently runtime
+ * suspended and if not we cannot do better than report the last
+ * known RC6 value.
+ */
+ if (kdev->power.runtime_status == RPM_SUSPENDED) {
+ if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ i915->pmu.suspended_jiffies_last =
+ kdev->power.suspended_jiffies;
- val = kdev->power.suspended_jiffies -
- i915->pmu.suspended_jiffies_last;
- val += jiffies - kdev->power.accounting_timestamp;
+ val = kdev->power.suspended_jiffies -
+ i915->pmu.suspended_jiffies_last;
+ val += jiffies - kdev->power.accounting_timestamp;
- spin_unlock(&kdev->power.lock);
+ val = jiffies_to_nsecs(val);
+ val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
- val = jiffies_to_nsecs(val);
- val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
- i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+ } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ } else {
+ val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ }
+ spin_unlock(&kdev->power.lock);
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 709d6ca68074..3ea566f99450 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp;
- if (!IS_GEN9_BC(dev_priv))
+ if (!IS_GEN9(dev_priv))
return;
i915_audio_component_get_power(kdev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c5c7530ba157..447b721c3be9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
return;
aux_channel = child->aux_channel;
- ddc_pin = child->ddc_pin;
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
- info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
-
- sanitize_ddc_pin(dev_priv, port);
+ ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
+ if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
+ info->alternate_ddc_pin = ddc_pin;
+ sanitize_ddc_pin(dev_priv, port);
+ } else {
+ DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
+ "sticking to defaults\n",
+ port_name(port), ddc_pin);
+ }
}
if (is_dp) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 697af5add78b..e3a5f673ff67 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* know the next preemption status we see corresponds
* to this ELSP update.
*/
+ GEM_BUG_ON(!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(!port_count(&port[0]));
if (port_count(&port[0]) > 1)
goto unlock;
@@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
memset(port, 0, sizeof(*port));
port++;
}
+
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data)
if (fw)
intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+
+ /* If the engine is now idle, so should be the flag; and vice versa. */
+ GEM_BUG_ON(execlists_is_active(&engine->execlists,
+ EXECLISTS_ACTIVE_USER) ==
+ !port_isset(engine->execlists.port));
}
static void queue_request(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 2decc8e2c79f..add9cc97a3b6 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
vc4_bo_set_label(obj, -1);
if (bo->validated_shader) {
+ kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
@@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
}
if (bo->validated_shader) {
+ kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index d3f15bf60900..7cf82b071de2 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
fail:
kfree(validation_state.branch_targets);
if (validated_shader) {
+ kfree(validated_shader->uniform_addr_offsets);
kfree(validated_shader->texture_samples);
kfree(validated_shader);
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a3a7ead3012..0b5cc910f62e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -525,6 +525,9 @@
#define I2C_VENDOR_ID_HANTICK 0x0911
#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
+#define I2C_VENDOR_ID_RAYD 0x2386
+#define I2C_PRODUCT_ID_RAYD_3118 0x3118
+
#define USB_VENDOR_ID_HANWANG 0x0b57
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 6836a856c243..930652c25120 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CAPACITY:
- if (dev->battery_report_type == HID_FEATURE_REPORT) {
+ if (dev->battery_status != HID_BATTERY_REPORTED &&
+ !dev->battery_avoid_query) {
value = hidinput_query_battery_capacity(dev);
if (value < 0)
return value;
@@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_STATUS:
- if (!dev->battery_reported &&
- dev->battery_report_type == HID_FEATURE_REPORT) {
+ if (dev->battery_status != HID_BATTERY_REPORTED &&
+ !dev->battery_avoid_query) {
value = hidinput_query_battery_capacity(dev);
if (value < 0)
return value;
dev->battery_capacity = value;
- dev->battery_reported = true;
+ dev->battery_status = HID_BATTERY_QUERIED;
}
- if (!dev->battery_reported)
+ if (dev->battery_status == HID_BATTERY_UNKNOWN)
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
else if (dev->battery_capacity == 100)
val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
dev->battery_report_type = report_type;
dev->battery_report_id = field->report->id;
+ /*
+ * Stylus is normally not connected to the device and thus we
+ * can't query the device and get meaningful battery strength.
+ * We have to wait for the device to report it on its own.
+ */
+ dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
+ field->physical == HID_DG_STYLUS;
+
dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
if (IS_ERR(dev->battery)) {
error = PTR_ERR(dev->battery);
@@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
capacity = hidinput_scale_battery_capacity(dev, value);
- if (!dev->battery_reported || capacity != dev->battery_capacity) {
+ if (dev->battery_status != HID_BATTERY_REPORTED ||
+ capacity != dev->battery_capacity) {
dev->battery_capacity = capacity;
- dev->battery_reported = true;
+ dev->battery_status = HID_BATTERY_REPORTED;
power_supply_changed(dev->battery);
}
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index fbfcc8009432..b39844adea47 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
int ret = 0, len;
unsigned char report_number;
+ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ ret = -ENODEV;
+ goto out;
+ }
+
dev = hidraw_table[minor]->hid;
if (!dev->ll_driver->raw_request) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 97689e98e53f..963328674e93 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -47,6 +47,7 @@
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
+#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
/* flags */
#define I2C_HID_STARTED 0
@@ -171,6 +172,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
+ I2C_HID_QUIRK_RESEND_REPORT_DESCR },
{ 0, 0 }
};
@@ -1220,6 +1223,16 @@ static int i2c_hid_resume(struct device *dev)
if (ret)
return ret;
+ /* RAYDIUM device (2386:3118) need to re-send report descr cmd
+ * after resume, after this it will be back normal.
+ * otherwise it issues too many incomplete reports.
+ */
+ if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
+ ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
if (hid->driver && hid->driver->reset_resume) {
ret = hid->driver->reset_resume(hid);
return ret;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 6da16a879c9f..5f947ec20dcb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id)
return tool_type;
}
+static void wacom_exit_report(struct wacom_wac *wacom)
+{
+ struct input_dev *input = wacom->pen_input;
+ struct wacom_features *features = &wacom->features;
+ unsigned char *data = wacom->data;
+ int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
+
+ /*
+ * Reset all states otherwise we lose the initial states
+ * when in-prox next time
+ */
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_DISTANCE, 0);
+ input_report_abs(input, ABS_TILT_X, 0);
+ input_report_abs(input, ABS_TILT_Y, 0);
+ if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
+ input_report_key(input, BTN_LEFT, 0);
+ input_report_key(input, BTN_MIDDLE, 0);
+ input_report_key(input, BTN_RIGHT, 0);
+ input_report_key(input, BTN_SIDE, 0);
+ input_report_key(input, BTN_EXTRA, 0);
+ input_report_abs(input, ABS_THROTTLE, 0);
+ input_report_abs(input, ABS_RZ, 0);
+ } else {
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_key(input, BTN_STYLUS, 0);
+ input_report_key(input, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_WHEEL, 0);
+ if (features->type >= INTUOS3S)
+ input_report_abs(input, ABS_Z, 0);
+ }
+ input_report_key(input, wacom->tool[idx], 0);
+ input_report_abs(input, ABS_MISC, 0); /* reset tool id */
+ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+ wacom->id[idx] = 0;
+}
+
static int wacom_intuos_inout(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
@@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
if (!wacom->id[idx])
return 1;
- /*
- * Reset all states otherwise we lose the initial states
- * when in-prox next time
- */
- input_report_abs(input, ABS_X, 0);
- input_report_abs(input, ABS_Y, 0);
- input_report_abs(input, ABS_DISTANCE, 0);
- input_report_abs(input, ABS_TILT_X, 0);
- input_report_abs(input, ABS_TILT_Y, 0);
- if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
- input_report_key(input, BTN_LEFT, 0);
- input_report_key(input, BTN_MIDDLE, 0);
- input_report_key(input, BTN_RIGHT, 0);
- input_report_key(input, BTN_SIDE, 0);
- input_report_key(input, BTN_EXTRA, 0);
- input_report_abs(input, ABS_THROTTLE, 0);
- input_report_abs(input, ABS_RZ, 0);
- } else {
- input_report_abs(input, ABS_PRESSURE, 0);
- input_report_key(input, BTN_STYLUS, 0);
- input_report_key(input, BTN_STYLUS2, 0);
- input_report_key(input, BTN_TOUCH, 0);
- input_report_abs(input, ABS_WHEEL, 0);
- if (features->type >= INTUOS3S)
- input_report_abs(input, ABS_Z, 0);
- }
- input_report_key(input, wacom->tool[idx], 0);
- input_report_abs(input, ABS_MISC, 0); /* reset tool id */
- input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
- wacom->id[idx] = 0;
+ wacom_exit_report(wacom);
return 2;
}
@@ -1235,6 +1245,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
if (!valid)
continue;
+ if (!prox) {
+ wacom->shared->stylus_in_proximity = false;
+ wacom_exit_report(wacom);
+ input_sync(pen_input);
+ return;
+ }
if (range) {
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c
index a6e87076acc2..5336bbdbfdc5 100644
--- a/drivers/isdn/mISDN/dsp_hwec.c
+++ b/drivers/isdn/mISDN/dsp_hwec.c
@@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
goto _do;
{
- char _dup[len + 1];
char *dup, *tok, *name, *val;
int tmp;
- strcpy(_dup, arg);
- dup = _dup;
+ dup = kstrdup(arg, GFP_ATOMIC);
+ if (!dup)
+ return;
while ((tok = strsep(&dup, ","))) {
if (!strlen(tok))
@@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
deftaps = tmp;
}
}
+
+ kfree(dup);
}
_do:
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 21d50e4cc5e1..b05022f94f18 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -279,7 +279,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
u16 timebase, u8 *buf, int len)
{
u8 *p;
- u8 frame[len + 32];
+ u8 frame[MAX_DFRAME_LEN_L1 + 32];
struct socket *socket = NULL;
if (debug & DEBUG_L1OIP_MSG)
@@ -902,7 +902,11 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
p = skb->data;
l = skb->len;
while (l) {
- ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
+ /*
+ * This is technically bounded by L1OIP_MAX_PERFRAME but
+ * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
+ */
+ ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
l1oip_socket_send(hc, 0, dch->slot, 0,
hc->chan[dch->slot].tx_counter++, p, ll);
p += ll;
@@ -1140,7 +1144,11 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
p = skb->data;
l = skb->len;
while (l) {
- ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
+ /*
+ * This is technically bounded by L1OIP_MAX_PERFRAME but
+ * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
+ */
+ ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
l1oip_socket_send(hc, hc->codec, bch->slot, 0,
hc->chan[bch->slot].tx_counter, p, ll);
hc->chan[bch->slot].tx_counter += ll;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3bea45e8ccff..c208c01f63a5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9256,8 +9256,10 @@ void md_reload_sb(struct mddev *mddev, int nr)
check_sb_changes(mddev, rdev);
/* Read all rdev's to update recovery_offset */
- rdev_for_each_rcu(rdev, mddev)
- read_rdev(mddev, rdev);
+ rdev_for_each_rcu(rdev, mddev) {
+ if (!test_bit(Faulty, &rdev->flags))
+ read_rdev(mddev, rdev);
+ }
}
EXPORT_SYMBOL(md_reload_sb);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e2943fb74056..e9e3308cb0a7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
+static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
{
int idx = sector_to_idx(sector_nr);
@@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
* max resync count which allowed on current I/O barrier bucket.
*/
wait_event_lock_irq(conf->wait_barrier,
- !conf->array_frozen &&
+ (!conf->array_frozen &&
!atomic_read(&conf->nr_pending[idx]) &&
- atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH,
+ atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
+ test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
conf->resync_lock);
+ if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
+ atomic_dec(&conf->barrier[idx]);
+ spin_unlock_irq(&conf->resync_lock);
+ wake_up(&conf->wait_barrier);
+ return -EINTR;
+ }
+
atomic_inc(&conf->nr_sync_pending);
spin_unlock_irq(&conf->resync_lock);
+
+ return 0;
}
static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
@@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
goto skip_copy;
}
+ behind_bio->bi_write_hint = bio->bi_write_hint;
+
while (i < vcnt && size) {
struct page *page;
int len = min_t(int, PAGE_SIZE, size);
@@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bitmap_cond_end_sync(mddev->bitmap, sector_nr,
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
- r1_bio = raid1_alloc_init_r1buf(conf);
- raise_barrier(conf, sector_nr);
+
+ if (raise_barrier(conf, sector_nr))
+ return 0;
+
+ r1_bio = raid1_alloc_init_r1buf(conf);
rcu_read_lock();
/*
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 231f3a1e27bf..86503f60468f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
+ .no_write_same = 1,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 8e0acd197c43..6af946d16d24 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io-64-nonatomic-hi-lo.h>
@@ -62,6 +63,17 @@
* need a custom accessor.
*/
+static unsigned long global_flags;
+/*
+ * Workaround for avoiding to use RX DMAC by multiple channels.
+ * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use
+ * RX DMAC simultaneously, sometimes hundreds of bytes data are not
+ * stored into the system memory even if the DMAC interrupt happened.
+ * So, this driver then uses one RX DMAC channel only.
+ */
+#define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0
+#define SDHI_INTERNAL_DMAC_RX_IN_USE 1
+
/* Definitions for sampling clocks */
static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
{
@@ -126,6 +138,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
RST_RESERVED_BITS | val);
+ if (host->data && host->data->flags & MMC_DATA_READ)
+ clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+
renesas_sdhi_internal_dmac_enable_dma(host, true);
}
@@ -155,6 +170,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
dir = DMA_FROM_DEVICE;
+ if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
+ test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
+ goto force_pio;
} else {
dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
dir = DMA_TO_DEVICE;
@@ -208,6 +226,9 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
renesas_sdhi_internal_dmac_enable_dma(host, false);
dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir);
+ if (dir == DMA_FROM_DEVICE)
+ clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
@@ -251,18 +272,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
* implementation as others may use a different implementation.
*/
static const struct soc_device_attribute gen3_soc_whitelist[] = {
- { .soc_id = "r8a7795", .revision = "ES1.*" },
- { .soc_id = "r8a7795", .revision = "ES2.0" },
- { .soc_id = "r8a7796", .revision = "ES1.0" },
- { .soc_id = "r8a77995", .revision = "ES1.0" },
- { /* sentinel */ }
+ { .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
+ { .soc_id = "r8a7795", .revision = "ES2.0" },
+ { .soc_id = "r8a7796", .revision = "ES1.0",
+ .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
+ { .soc_id = "r8a77995", .revision = "ES1.0" },
+ { /* sentinel */ }
};
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
- if (!soc_device_match(gen3_soc_whitelist))
+ const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist);
+
+ if (!soc)
return -ENODEV;
+ global_flags |= (unsigned long)soc->data;
+
return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops);
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 787434e5589d..78c25ad35fd2 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1312,7 +1312,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev)
pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
}
-static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
+static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct pci_dev *pdev = slot->chip->pdev;
@@ -1351,6 +1351,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
return 0;
}
+static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /* AMD requires custom HS200 tuning */
+ if (host->timing == MMC_TIMING_MMC_HS200)
+ return amd_execute_tuning_hs200(host, opcode);
+
+ /* Otherwise perform standard SDHCI tuning */
+ return sdhci_execute_tuning(mmc, opcode);
+}
+
+static int amd_probe_slot(struct sdhci_pci_slot *slot)
+{
+ struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
+
+ ops->execute_tuning = amd_execute_tuning;
+
+ return 0;
+}
+
static int amd_probe(struct sdhci_pci_chip *chip)
{
struct pci_dev *smbus_dev;
@@ -1385,12 +1406,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .platform_execute_tuning = amd_execute_tuning,
};
static const struct sdhci_pci_fixes sdhci_amd = {
.probe = amd_probe,
.ops = &amd_sdhci_pci_ops,
+ .probe_slot = amd_probe_slot,
};
static const struct pci_device_id pci_ids[] = {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b7b113018853..718e4914e3a0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
} /* switch(bond_mode) */
#ifdef CONFIG_NET_POLL_CONTROLLER
- slave_dev->npinfo = bond->dev->npinfo;
- if (slave_dev->npinfo) {
+ if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index ac7694c71266..a036c490b7ce 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -285,10 +285,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
struct sk_buff_head *rxq)
{
u16 buf[4] = { 0 }, status, seq_id;
- u64 ns, timelo, timehi;
struct skb_shared_hwtstamps *shwt;
+ struct sk_buff_head received;
+ u64 ns, timelo, timehi;
+ unsigned long flags;
int err;
+ /* The latched timestamp belongs to one of the received frames. */
+ __skb_queue_head_init(&received);
+ spin_lock_irqsave(&rxq->lock, flags);
+ skb_queue_splice_tail_init(rxq, &received);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
reg, buf, ARRAY_SIZE(buf));
@@ -311,7 +319,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
/* Since the device can only handle one time stamp at a time,
* we purge any extra frames from the queue.
*/
- for ( ; skb; skb = skb_dequeue(rxq)) {
+ for ( ; skb; skb = __skb_dequeue(&received)) {
if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
ns = timehi << 16 | timelo;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 7ea72ef11a55..d272dc6984ac 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1321,6 +1321,10 @@
#define MDIO_VEND2_AN_STAT 0x8002
#endif
+#ifndef MDIO_VEND2_PMA_CDR_CONTROL
+#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
+#endif
+
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
@@ -1369,6 +1373,10 @@
#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
+#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
+#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
+#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 7d128be61310..b91143947ed2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
"debugfs_create_file failed\n");
}
+ if (pdata->vdata->an_cdr_workaround) {
+ pfile = debugfs_create_bool("an_cdr_workaround", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_workaround);
+ if (!pfile)
+ netdev_err(pdata->netdev,
+ "debugfs_create_bool failed\n");
+
+ pfile = debugfs_create_bool("an_cdr_track_early", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_track_early);
+ if (!pfile)
+ netdev_err(pdata->netdev,
+ "debugfs_create_bool failed\n");
+ }
+
kfree(buf);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 795e556d4a3f..441d0973957b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Call MDIO/PHY initialization routine */
+ pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
ret = pdata->phy_if.phy_init(pdata);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 072b9f664597..1b45cd73a258 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
xgbe_an73_set(pdata, false, false);
xgbe_an73_disable_interrupts(pdata);
+ pdata->an_start = 0;
+
netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
}
static void xgbe_an_restart(struct xgbe_prv_data *pdata)
{
+ if (pdata->phy_if.phy_impl.an_pre)
+ pdata->phy_if.phy_impl.an_pre(pdata);
+
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
case XGBE_AN_MODE_CL73_REDRV:
@@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
static void xgbe_an_disable(struct xgbe_prv_data *pdata)
{
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
case XGBE_AN_MODE_CL73_REDRV:
@@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
reg);
- if (pdata->phy_if.phy_impl.kr_training_post)
- pdata->phy_if.phy_impl.kr_training_post(pdata);
-
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
+
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
}
return XGBE_AN_PAGE_RECEIVED;
@@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
return XGBE_AN_NO_LINK;
}
- xgbe_an73_disable(pdata);
+ xgbe_an_disable(pdata);
xgbe_switch_mode(pdata);
- xgbe_an73_restart(pdata);
+ xgbe_an_restart(pdata);
return XGBE_AN_INCOMPAT_LINK;
}
@@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
pdata->an_result = pdata->an_state;
pdata->an_state = XGBE_AN_READY;
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
xgbe_state_as_string(pdata->an_result));
}
@@ -903,6 +914,9 @@ again:
pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = 0;
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
xgbe_state_as_string(pdata->an_result));
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index eb23f9ba1a9a..82d1f416ee2a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
.irq_reissue_support = 1,
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
+ .an_cdr_workaround = 1,
};
static const struct xgbe_version_data xgbe_v2b = {
@@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
.irq_reissue_support = 1,
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
+ .an_cdr_workaround = 1,
};
static const struct pci_device_id xgbe_pci_table[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 3304a291aa96..aac884314000 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -147,6 +147,14 @@
/* Rate-change complete wait/retry count */
#define XGBE_RATECHANGE_COUNT 500
+/* CDR delay values for KR support (in usec) */
+#define XGBE_CDR_DELAY_INIT 10000
+#define XGBE_CDR_DELAY_INC 10000
+#define XGBE_CDR_DELAY_MAX 100000
+
+/* RRC frequency during link status check */
+#define XGBE_RRC_FREQUENCY 10
+
enum xgbe_port_mode {
XGBE_PORT_MODE_RSVD = 0,
XGBE_PORT_MODE_BACKPLANE,
@@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
#define XGBE_SFP_BASE_VENDOR_SN 4
#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
+#define XGBE_SFP_EXTD_OPT1 1
+#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
+#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
+
#define XGBE_SFP_EXTD_DIAG 28
#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
@@ -324,6 +336,7 @@ struct xgbe_phy_data {
unsigned int sfp_gpio_address;
unsigned int sfp_gpio_mask;
+ unsigned int sfp_gpio_inputs;
unsigned int sfp_gpio_rx_los;
unsigned int sfp_gpio_tx_fault;
unsigned int sfp_gpio_mod_absent;
@@ -355,6 +368,10 @@ struct xgbe_phy_data {
unsigned int redrv_addr;
unsigned int redrv_lane;
unsigned int redrv_model;
+
+ /* KR AN support */
+ unsigned int phy_cdr_notrack;
+ unsigned int phy_cdr_delay;
};
/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
@@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
phy_data->sfp_phy_avail = 1;
}
+static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
+{
+ u8 *sfp_extd = phy_data->sfp_eeprom.extd;
+
+ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
+ return false;
+
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
+ return false;
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
+ return true;
+
+ return false;
+}
+
+static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
+{
+ u8 *sfp_extd = phy_data->sfp_eeprom.extd;
+
+ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
+ return false;
+
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
+ return false;
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
+ return true;
+
+ return false;
+}
+
+static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
+{
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
+ return false;
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
+ return true;
+
+ return false;
+}
+
static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
return;
+ /* Update transceiver signals (eeprom extd/options) */
+ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
+ phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
+
if (xgbe_phy_sfp_parse_quirks(pdata))
return;
@@ -1184,7 +1248,6 @@ put:
static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int gpio_input;
u8 gpio_reg, gpio_ports[2];
int ret;
@@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
return;
}
- gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
-
- if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
- /* No GPIO, just assume the module is present for now */
- phy_data->sfp_mod_absent = 0;
- } else {
- if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
- phy_data->sfp_mod_absent = 0;
- }
-
- if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
- (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
- phy_data->sfp_rx_los = 1;
+ phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
- if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) &&
- (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
- phy_data->sfp_tx_fault = 1;
+ phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
}
static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
@@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 1;
/* No link, attempt a receiver reset cycle */
- if (phy_data->rrc_count++) {
+ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
phy_data->rrc_count = 0;
xgbe_phy_rrc(pdata);
}
@@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
return true;
}
+static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->debugfs_an_cdr_workaround)
+ return;
+
+ if (!phy_data->phy_cdr_notrack)
+ return;
+
+ usleep_range(phy_data->phy_cdr_delay,
+ phy_data->phy_cdr_delay + 500);
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ XGBE_PMA_CDR_TRACK_EN_MASK,
+ XGBE_PMA_CDR_TRACK_EN_ON);
+
+ phy_data->phy_cdr_notrack = 0;
+}
+
+static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->debugfs_an_cdr_workaround)
+ return;
+
+ if (phy_data->phy_cdr_notrack)
+ return;
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ XGBE_PMA_CDR_TRACK_EN_MASK,
+ XGBE_PMA_CDR_TRACK_EN_OFF);
+
+ xgbe_phy_rrc(pdata);
+
+ phy_data->phy_cdr_notrack = 1;
+}
+
+static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->debugfs_an_cdr_track_early)
+ xgbe_phy_cdr_track(pdata);
+}
+
+static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
+{
+ if (pdata->debugfs_an_cdr_track_early)
+ xgbe_phy_cdr_track(pdata);
+}
+
+static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != XGBE_MODE_KR)
+ break;
+
+ xgbe_phy_cdr_track(pdata);
+
+ switch (pdata->an_result) {
+ case XGBE_AN_READY:
+ case XGBE_AN_COMPLETE:
+ break;
+ default:
+ if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
+ phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
+ else
+ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != XGBE_MODE_KR)
+ break;
+
+ xgbe_phy_cdr_notrack(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
xgbe_phy_sfp_reset(phy_data);
xgbe_phy_sfp_mod_absent(pdata);
+ /* Reset CDR support */
+ xgbe_phy_cdr_track(pdata);
+
/* Power off the PHY */
xgbe_phy_power_off(pdata);
@@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* Start in highest supported mode */
xgbe_phy_set_mode(pdata, phy_data->start_mode);
+ /* Reset CDR support */
+ xgbe_phy_cdr_track(pdata);
+
/* After starting the I2C controller, we can check for an SFP */
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_SFP:
@@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
}
}
+ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+
/* Register for driving external PHYs */
mii = devm_mdiobus_alloc(pdata->dev);
if (!mii) {
@@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
phy_impl->an_advertising = xgbe_phy_an_advertising;
phy_impl->an_outcome = xgbe_phy_an_outcome;
+
+ phy_impl->an_pre = xgbe_phy_an_pre;
+ phy_impl->an_post = xgbe_phy_an_post;
+
+ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ad102c8bac7b..95d4b56448c6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -833,6 +833,7 @@ struct xgbe_hw_if {
/* This structure represents implementation specific routines for an
* implementation of a PHY. All routines are required unless noted below.
* Optional routines:
+ * an_pre, an_post
* kr_training_pre, kr_training_post
*/
struct xgbe_phy_impl_if {
@@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
/* Process results of auto-negotiation */
enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
+ /* Pre/Post auto-negotiation support */
+ void (*an_pre)(struct xgbe_prv_data *);
+ void (*an_post)(struct xgbe_prv_data *);
+
/* Pre/Post KR training enablement support */
void (*kr_training_pre)(struct xgbe_prv_data *);
void (*kr_training_post)(struct xgbe_prv_data *);
@@ -989,6 +994,7 @@ struct xgbe_version_data {
unsigned int irq_reissue_support;
unsigned int tx_desc_prefetch;
unsigned int rx_desc_prefetch;
+ unsigned int an_cdr_workaround;
};
struct xgbe_vxlan_data {
@@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
unsigned int debugfs_xprop_reg;
unsigned int debugfs_xi2c_reg;
+
+ bool debugfs_an_cdr_workaround;
+ bool debugfs_an_cdr_track_early;
};
/* Function prototypes*/
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1f622ca2a64f..8ba14ae00e8f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1927,22 +1927,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
return retval;
}
-static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
+static void bnxt_get_pkgver(struct net_device *dev)
{
+ struct bnxt *bp = netdev_priv(dev);
u16 index = 0;
- u32 datalen;
+ char *pkgver;
+ u32 pkglen;
+ u8 *pkgbuf;
+ int len;
if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, NULL, &datalen) != 0)
- return NULL;
+ &index, NULL, &pkglen) != 0)
+ return;
- memset(buf, 0, buflen);
- if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
- return NULL;
+ pkgbuf = kzalloc(pkglen, GFP_KERNEL);
+ if (!pkgbuf) {
+ dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
+ pkglen);
+ return;
+ }
+
+ if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
+ goto err;
- return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
- datalen);
+ pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
+ pkglen);
+ if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
+ len = strlen(bp->fw_ver_str);
+ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
+ "/pkg %s", pkgver);
+ }
+err:
+ kfree(pkgbuf);
}
static int bnxt_get_eeprom(struct net_device *dev,
@@ -2615,22 +2632,10 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct hwrm_selftest_qlist_input req = {0};
struct bnxt_test_info *test_info;
struct net_device *dev = bp->dev;
- char *pkglog;
int i, rc;
- pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
- if (pkglog) {
- char *pkgver;
- int len;
+ bnxt_get_pkgver(dev);
- pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
- if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
- len = strlen(bp->fw_ver_str);
- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
- "/pkg %s", pkgver);
- }
- kfree(pkglog);
- }
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 73f2249555b5..83444811d3c6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type {
#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
-#define BNX_PKG_LOG_MAX_LENGTH 4096
-
enum bnxnvm_pkglog_field_index {
BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 3e62692af011..fa5b30f547f6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -87,7 +87,7 @@ do { \
#define HNAE_AE_REGISTER 0x1
-#define RCB_RING_NAME_LEN 16
+#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
#define HNAE_LOWEST_LATENCY_COAL_PARAM 30
#define HNAE_LOW_LATENCY_COAL_PARAM 80
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index aad5658d79d5..6e8d6a6f6aaf 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -794,46 +794,61 @@ static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
- struct device *dev = &adapter->vdev->dev;
+ int retry_count = 0;
int rc;
do {
- if (adapter->renegotiate) {
- adapter->renegotiate = false;
+ if (retry_count > IBMVNIC_MAX_QUEUES) {
+ netdev_warn(netdev, "Login attempts exceeded\n");
+ return -1;
+ }
+
+ adapter->init_done_rc = 0;
+ reinit_completion(&adapter->init_done);
+ rc = send_login(adapter);
+ if (rc) {
+ netdev_warn(netdev, "Unable to login\n");
+ return rc;
+ }
+
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ netdev_warn(netdev, "Login timed out\n");
+ return -1;
+ }
+
+ if (adapter->init_done_rc == PARTIALSUCCESS) {
+ retry_count++;
release_sub_crqs(adapter, 1);
+ adapter->init_done_rc = 0;
reinit_completion(&adapter->init_done);
send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
- dev_err(dev, "Capabilities query timeout\n");
+ netdev_warn(netdev,
+ "Capabilities query timed out\n");
return -1;
}
+
rc = init_sub_crqs(adapter);
if (rc) {
- dev_err(dev,
- "Initialization of SCRQ's failed\n");
+ netdev_warn(netdev,
+ "SCRQ initialization failed\n");
return -1;
}
+
rc = init_sub_crq_irqs(adapter);
if (rc) {
- dev_err(dev,
- "Initialization of SCRQ's irqs failed\n");
+ netdev_warn(netdev,
+ "SCRQ irq initialization failed\n");
return -1;
}
- }
-
- reinit_completion(&adapter->init_done);
- rc = send_login(adapter);
- if (rc) {
- dev_err(dev, "Unable to attempt device login\n");
- return rc;
- } else if (!wait_for_completion_timeout(&adapter->init_done,
- timeout)) {
- dev_err(dev, "Login timeout\n");
+ } else if (adapter->init_done_rc) {
+ netdev_warn(netdev, "Adapter login failed\n");
return -1;
}
- } while (adapter->renegotiate);
+ } while (adapter->init_done_rc == PARTIALSUCCESS);
/* handle pending MAC address changes after successful login */
if (adapter->mac_change_pending) {
@@ -1034,16 +1049,14 @@ static int __ibmvnic_open(struct net_device *netdev)
netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
if (prev_state == VNIC_CLOSED)
enable_irq(adapter->rx_scrq[i]->irq);
- else
- enable_scrq_irq(adapter, adapter->rx_scrq[i]);
+ enable_scrq_irq(adapter, adapter->rx_scrq[i]);
}
for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
if (prev_state == VNIC_CLOSED)
enable_irq(adapter->tx_scrq[i]->irq);
- else
- enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+ enable_scrq_irq(adapter, adapter->tx_scrq[i]);
}
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
@@ -1115,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->rx_pool)
return;
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ rx_scrqs = adapter->num_active_rx_pools;
rx_entries = adapter->req_rx_add_entries_per_subcrq;
/* Free any remaining skbs in the rx buffer pools */
@@ -1164,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->tx_pool || !adapter->tso_pool)
return;
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_scrqs = adapter->num_active_tx_pools;
/* Free any remaining skbs in the tx buffer pools */
for (i = 0; i < tx_scrqs; i++) {
@@ -1184,6 +1197,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
if (adapter->tx_scrq[i]->irq) {
netdev_dbg(netdev,
"Disabling tx_scrq[%d] irq\n", i);
+ disable_scrq_irq(adapter, adapter->tx_scrq[i]);
disable_irq(adapter->tx_scrq[i]->irq);
}
}
@@ -1193,6 +1207,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
if (adapter->rx_scrq[i]->irq) {
netdev_dbg(netdev,
"Disabling rx_scrq[%d] irq\n", i);
+ disable_scrq_irq(adapter, adapter->rx_scrq[i]);
disable_irq(adapter->rx_scrq[i]->irq);
}
}
@@ -1828,7 +1843,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
- if (adapter->reset_reason != VNIC_RESET_FAILOVER)
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
+ adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
netdev_notify_peers(netdev);
netif_carrier_on(netdev);
@@ -2601,12 +2617,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
{
struct device *dev = &adapter->vdev->dev;
unsigned long rc;
+ u64 val;
if (scrq->hw_irq > 0x100000000ULL) {
dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
return 1;
}
+ val = (0xff000000) | scrq->hw_irq;
+ rc = plpar_hcall_norets(H_EOI, val);
+ if (rc)
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
+ val, rc);
+
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
if (rc)
@@ -3170,7 +3193,7 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter)
struct vnic_login_client_data {
u8 type;
__be16 len;
- char name;
+ char name[];
} __packed;
static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
@@ -3199,21 +3222,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
vlcd->type = 1;
len = strlen(os_name) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(&vlcd->name, os_name, len);
- vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
+ strncpy(vlcd->name, os_name, len);
+ vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 2 - LPAR name */
vlcd->type = 2;
len = strlen(utsname()->nodename) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(&vlcd->name, utsname()->nodename, len);
- vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
+ strncpy(vlcd->name, utsname()->nodename, len);
+ vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 3 - device name */
vlcd->type = 3;
len = strlen(adapter->netdev->name) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(&vlcd->name, adapter->netdev->name, len);
+ strncpy(vlcd->name, adapter->netdev->name, len);
}
static int send_login(struct ibmvnic_adapter *adapter)
@@ -3942,7 +3965,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
* to resend the login buffer with fewer queues requested.
*/
if (login_rsp_crq->generic.rc.code) {
- adapter->renegotiate = true;
+ adapter->init_done_rc = login_rsp_crq->generic.rc.code;
complete(&adapter->init_done);
return 0;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 99c0b58c2c39..22391e8805f6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1035,7 +1035,6 @@ struct ibmvnic_adapter {
struct ibmvnic_sub_crq_queue **tx_scrq;
struct ibmvnic_sub_crq_queue **rx_scrq;
- bool renegotiate;
/* rx structs */
struct napi_struct *napi;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5b13ca1bd85f..7dc5f045e969 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_MIRROR_VSI_ID_S 3
#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
- /* Action type = 5 - Large Action */
+ /* Action type = 5 - Generic Value */
#define ICE_LG_ACT_GENERIC 0x5
#define ICE_LG_ACT_GENERIC_VALUE_S 3
#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 21977ec984c4..71d032cc5fa7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_aq_desc desc;
enum ice_status status;
u16 flags;
+ u8 i;
cmd = &desc.params.mac_read;
@@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
return ICE_ERR_CFG;
}
- ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr);
- ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr);
+ /* A single port can report up to two (LAN and WoL) addresses */
+ for (i = 0; i < cmd->num_addr; i++)
+ if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
+ ether_addr_copy(hw->port_info->mac.lan_addr,
+ resp[i].mac_addr);
+ ether_addr_copy(hw->port_info->mac.perm_addr,
+ resp[i].mac_addr);
+ break;
+ }
+
return 0;
}
@@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- /* Get port MAC information */
- mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
- mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
+ /* Get MAC information */
+ /* A single port can report up to two (LAN and WoL) addresses */
+ mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
+ sizeof(struct ice_aqc_manage_mac_read_resp),
+ GFP_KERNEL);
+ mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
if (!mac_buf) {
status = ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1b9e2ef48a9d..499904874b3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,8 +121,6 @@
#define PFINT_FW_CTL_CAUSE_ENA_S 30
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
#define PFINT_OICR 0x0016CA00
-#define PFINT_OICR_INTEVENT_S 0
-#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
#define PFINT_OICR_HLP_RDY_S 14
#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
#define PFINT_OICR_CPM_RDY_S 15
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 210b7910f1cd..5299caf55a7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
- if (!(oicr & PFINT_OICR_INTEVENT_M))
- goto ena_intr;
-
if (oicr & PFINT_OICR_GRST_M) {
u32 reset;
/* we have a reset warning */
@@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
ret = IRQ_HANDLED;
-ena_intr:
/* re-enable interrupt causes that are not handled during this pass */
wr32(hw, PFINT_OICR_ENA, ena_mask);
if (!test_bit(__ICE_DOWN, pf->state)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index f16ff3e4a840..2e6c1d92cc88 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
u16 num_added = 0;
u32 temp;
+ *num_nodes_added = 0;
+
if (!num_nodes)
return status;
if (!parent || layer < hw->sw_entry_point_layer)
return ICE_ERR_PARAM;
- *num_nodes_added = 0;
-
/* max children per node per layer */
max_child_nodes =
le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c1c0bc30a16d..cce7ada89255 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
- if (enable) {
+ if (enable || queue == 0) {
+ /* i210 does not allow the queue 0 to be in the Strict
+ * Priority mode while the Qav mode is enabled, so,
+ * instead of disabling strict priority mode, we give
+ * queue 0 the maximum of credits possible.
+ *
+ * See section 8.12.19 of the i210 datasheet, "Note:
+ * Queue0 QueueMode must be set to 1b when
+ * TransmitMode is set to Qav."
+ */
+ if (queue == 0 && !enable) {
+ /* max "linkspeed" idleslope in kbps */
+ idleslope = 1000000;
+ hicredit = ETH_FRAME_LEN;
+ }
+
set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3d9033f26eff..e3d04f226d57 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
if (!err)
continue;
hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
- break;
+ goto err_setup_tx;
}
return 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 54a038943c06..4202f9b5b966 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -663,7 +663,7 @@ enum mvpp2_tag_type {
#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
-#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1)
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
@@ -916,6 +916,8 @@ static struct {
#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
+#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+
/* Definitions */
/* Shared Packet Processor resources */
@@ -1429,7 +1431,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
if (port->priv->hw_version == MVPP21)
return tx_desc->pp21.buf_dma_addr;
else
- return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
+ return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
}
static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@@ -1447,7 +1449,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
} else {
u64 val = (u64)addr;
- tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
+ tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
tx_desc->pp22.buf_dma_addr_ptp |= val;
tx_desc->pp22.packet_offset = offset;
}
@@ -1507,7 +1509,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
if (port->priv->hw_version == MVPP21)
return rx_desc->pp21.buf_dma_addr;
else
- return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
+ return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
}
static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
@@ -1516,7 +1518,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
if (port->priv->hw_version == MVPP21)
return rx_desc->pp21.buf_cookie;
else
- return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
+ return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
}
static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
@@ -8789,7 +8791,7 @@ static int mvpp2_probe(struct platform_device *pdev)
}
if (priv->hw_version == MVPP22) {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
if (err)
goto err_mg_clk;
/* Sadly, the BM pools all share the same register to
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 3735c09d2112..577659f332e4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
- case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
- /* Acks from the NFP that the route is added - ignore. */
- break;
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
@@ -275,18 +272,49 @@ out:
void nfp_flower_cmsg_process_rx(struct work_struct *work)
{
+ struct sk_buff_head cmsg_joined;
struct nfp_flower_priv *priv;
struct sk_buff *skb;
priv = container_of(work, struct nfp_flower_priv, cmsg_work);
+ skb_queue_head_init(&cmsg_joined);
+
+ spin_lock_bh(&priv->cmsg_skbs_high.lock);
+ skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
+ spin_unlock_bh(&priv->cmsg_skbs_high.lock);
- while ((skb = skb_dequeue(&priv->cmsg_skbs)))
+ spin_lock_bh(&priv->cmsg_skbs_low.lock);
+ skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
+ spin_unlock_bh(&priv->cmsg_skbs_low.lock);
+
+ while ((skb = __skb_dequeue(&cmsg_joined)))
nfp_flower_cmsg_process_one_rx(priv->app, skb);
}
-void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
+static void
+nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
{
struct nfp_flower_priv *priv = app->priv;
+ struct sk_buff_head *skb_head;
+
+ if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
+ type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
+ skb_head = &priv->cmsg_skbs_high;
+ else
+ skb_head = &priv->cmsg_skbs_low;
+
+ if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
+ nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb_queue_tail(skb_head, skb);
+ schedule_work(&priv->cmsg_work);
+}
+
+void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
struct nfp_flower_cmsg_hdr *cmsg_hdr;
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
@@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_process_mtu_ack(app, skb)) {
/* Handle MTU acks outside wq to prevent RTNL conflict. */
dev_consume_skb_any(skb);
+ } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
+ /* Acks from the NFP that the route is added - ignore. */
+ dev_consume_skb_any(skb);
} else {
- skb_queue_tail(&priv->cmsg_skbs, skb);
- schedule_work(&priv->cmsg_work);
+ nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 96bc0e33980c..b6c0fd053a50 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -108,6 +108,8 @@
#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
+
#define nfp_flower_cmsg_warn(app, fmt, args...) \
do { \
if (net_ratelimit()) \
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 6357e0720f43..ad02592a82b7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -519,7 +519,8 @@ static int nfp_flower_init(struct nfp_app *app)
app->priv = app_priv;
app_priv->app = app;
- skb_queue_head_init(&app_priv->cmsg_skbs);
+ skb_queue_head_init(&app_priv->cmsg_skbs_high);
+ skb_queue_head_init(&app_priv->cmsg_skbs_low);
INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
init_waitqueue_head(&app_priv->reify_wait_queue);
@@ -549,7 +550,8 @@ static void nfp_flower_clean(struct nfp_app *app)
{
struct nfp_flower_priv *app_priv = app->priv;
- skb_queue_purge(&app_priv->cmsg_skbs);
+ skb_queue_purge(&app_priv->cmsg_skbs_high);
+ skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
nfp_flower_metadata_cleanup(app);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e030b3ce4510..c67e1b54c614 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -107,7 +107,10 @@ struct nfp_mtu_conf {
* @mask_table: Hash table used to store masks
* @flow_table: Hash table used to store flower rules
* @cmsg_work: Workqueue for control messages processing
- * @cmsg_skbs: List of skbs for control message processing
+ * @cmsg_skbs_high: List of higher priority skbs for control message
+ * processing
+ * @cmsg_skbs_low: List of lower priority skbs for control message
+ * processing
* @nfp_mac_off_list: List of MAC addresses to offload
* @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
* @nfp_ipv4_off_list: List of IPv4 addresses to offload
@@ -136,7 +139,8 @@ struct nfp_flower_priv {
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
struct work_struct cmsg_work;
- struct sk_buff_head cmsg_skbs;
+ struct sk_buff_head cmsg_skbs_high;
+ struct sk_buff_head cmsg_skbs_low;
struct list_head nfp_mac_off_list;
struct list_head nfp_mac_index_list;
struct list_head nfp_ipv4_off_list;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index f7b958181126..cb28ac03e4ca 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -211,8 +211,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
break;
err = msleep_interruptible(timeout_ms);
- if (err != 0)
+ if (err != 0) {
+ nfp_info(mutex->cpp,
+ "interrupted waiting for NFP mutex\n");
return -ERESTARTSYS;
+ }
if (time_is_before_eq_jiffies(warn_at)) {
warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 99bb679a9801..2abee0fe3a7c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
if ((*reg & mask) == val)
return 0;
- if (msleep_interruptible(25))
- return -ERESTARTSYS;
+ msleep(25);
if (time_after(start_time, wait_until))
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index d33988570217..5f4e447c5dce 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -350,15 +350,16 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
real_dev = priv->real_dev;
- if (!rmnet_is_real_dev_registered(real_dev))
- return -ENODEV;
-
if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
goto nla_put_failure;
- port = rmnet_get_port_rtnl(real_dev);
+ if (rmnet_is_real_dev_registered(real_dev)) {
+ port = rmnet_get_port_rtnl(real_dev);
+ f.flags = port->data_format;
+ } else {
+ f.flags = 0;
+ }
- f.flags = port->data_format;
f.mask = ~0;
if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 50daad0a1482..63036d9bf3e6 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
atomic_set(&efx->active_queues, 0);
}
-static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
-{
- if ((left->match_flags ^ right->match_flags) |
- ((left->flags ^ right->flags) &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
- return false;
-
- return memcmp(&left->outer_vid, &right->outer_vid,
- sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
-}
-
-static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
-{
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
- return jhash2((const u32 *)&spec->outer_vid,
- (sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
- 0);
- /* XXX should we randomise the initval? */
-}
-
/* Decide whether a filter should be exclusive or else should allow
* delivery to additional recipients. Currently we decide that
* filters for specific local unicast MAC and IP addresses are
@@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
goto out_unlock;
match_pri = rc;
- hash = efx_ef10_filter_hash(spec);
+ hash = efx_filter_spec_hash(spec);
is_mc_recip = efx_filter_is_mc_recipient(spec);
if (is_mc_recip)
bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
@@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
if (!saved_spec) {
if (ins_index < 0)
ins_index = i;
- } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ } else if (efx_filter_spec_equal(spec, saved_spec)) {
if (spec->priority < saved_spec->priority &&
spec->priority != EFX_FILTER_PRI_AUTO) {
rc = -EPERM;
@@ -4762,28 +4739,62 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int filter_idx)
{
+ struct efx_filter_spec *spec, saved_spec;
struct efx_ef10_filter_table *table;
- struct efx_filter_spec *spec;
- bool ret;
+ struct efx_arfs_rule *rule = NULL;
+ bool ret = true, force = false;
+ u16 arfs_id;
down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec || spec->priority != EFX_FILTER_PRI_HINT) {
- ret = true;
+ if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
goto out_unlock;
- }
- if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
- flow_id, filter_idx)) {
- ret = false;
- goto out_unlock;
+ spin_lock_bh(&efx->rps_hash_lock);
+ if (!efx->rps_hash_table) {
+ /* In the absence of the table, we always return 0 to ARFS. */
+ arfs_id = 0;
+ } else {
+ rule = efx_rps_hash_find(efx, spec);
+ if (!rule)
+ /* ARFS table doesn't know of this filter, so remove it */
+ goto expire;
+ arfs_id = rule->arfs_id;
+ ret = efx_rps_check_rule(rule, filter_idx, &force);
+ if (force)
+ goto expire;
+ if (!ret) {
+ spin_unlock_bh(&efx->rps_hash_lock);
+ goto out_unlock;
+ }
}
-
+ if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
+ ret = false;
+ else if (rule)
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+expire:
+ saved_spec = *spec; /* remove operation will kfree spec */
+ spin_unlock_bh(&efx->rps_hash_lock);
+ /* At this point (since we dropped the lock), another thread might queue
+ * up a fresh insertion request (but the actual insertion will be held
+ * up by our possession of the filter table lock). In that case, it
+ * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
+ * the rule is not removed by efx_rps_hash_del() below.
+ */
ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
filter_idx, true) == 0;
+ /* While we can't safely dereference rule (we dropped the lock), we can
+ * still test it for NULL.
+ */
+ if (ret && rule) {
+ /* Expiring, so remove entry from ARFS table */
+ spin_lock_bh(&efx->rps_hash_lock);
+ efx_rps_hash_del(efx, &saved_spec);
+ spin_unlock_bh(&efx->rps_hash_lock);
+ }
out_unlock:
up_write(&table->lock);
up_read(&efx->filter_sem);
@@ -5265,7 +5276,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
ids = vlan->uc;
}
- filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
@@ -5334,7 +5345,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
int rc;
u16 *id;
- filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 692dd729ee2a..a4ebd8715494 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3027,6 +3027,10 @@ static int efx_init_struct(struct efx_nic *efx,
mutex_init(&efx->mac_lock);
#ifdef CONFIG_RFS_ACCEL
mutex_init(&efx->rps_mutex);
+ spin_lock_init(&efx->rps_hash_lock);
+ /* Failure to allocate is not fatal, but may degrade ARFS performance */
+ efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
+ sizeof(*efx->rps_hash_table), GFP_KERNEL);
#endif
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
@@ -3070,6 +3074,10 @@ static void efx_fini_struct(struct efx_nic *efx)
{
int i;
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_hash_table);
+#endif
+
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
@@ -3092,6 +3100,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
}
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force)
+{
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
+ /* ARFS is currently updating this entry, leave it */
+ return false;
+ }
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
+ /* ARFS tried and failed to update this, so it's probably out
+ * of date. Remove the filter and the ARFS rule entry.
+ */
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+ *force = true;
+ return true;
+ } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
+ /* ARFS has moved on, so old filter is not needed. Since we did
+ * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
+ * not be removed by efx_rps_hash_del() subsequently.
+ */
+ *force = true;
+ return true;
+ }
+ /* Remove it iff ARFS wants to. */
+ return true;
+}
+
+struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ u32 hash = efx_filter_spec_hash(spec);
+
+ WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
+ if (!efx->rps_hash_table)
+ return NULL;
+ return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
+}
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec))
+ return rule;
+ }
+ return NULL;
+}
+
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ *new = false;
+ return rule;
+ }
+ }
+ rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
+ *new = true;
+ if (rule) {
+ memcpy(&rule->spec, spec, sizeof(rule->spec));
+ hlist_add_head(&rule->node, head);
+ }
+ return rule;
+}
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (WARN_ON(!head))
+ return;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ /* Someone already reused the entry. We know that if
+ * this check doesn't fire (i.e. filter_id == REMOVING)
+ * then the REMOVING mark was put there by our caller,
+ * because caller is holding a lock on filter table and
+ * only holders of that lock set REMOVING.
+ */
+ if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
+ return;
+ hlist_del(node);
+ kfree(rule);
+ return;
+ }
+ }
+ /* We didn't find it. */
+ WARN_ON(1);
+}
+#endif
+
/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
* (a) this is an infrequent control-plane operation and (b) n is small (max 64)
*/
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a3140e16fcef..3f759ebdcf10 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {}
#endif
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right);
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force);
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec);
+
+/* @new is written to indicate if entry was newly added (true) or if an old
+ * entry was found and returned (false).
+ */
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new);
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
+#endif
+
/* RSS contexts */
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4a19c7efdf8d..c72adf8b52ea 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
{
struct efx_farch_filter_state *state = efx->filter_state;
struct efx_farch_filter_table *table;
- bool ret = false;
+ bool ret = false, force = false;
+ u16 arfs_id;
down_write(&state->lock);
+ spin_lock_bh(&efx->rps_hash_lock);
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
if (test_bit(index, table->used_bitmap) &&
- table->spec[index].priority == EFX_FILTER_PRI_HINT &&
- rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
- flow_id, index)) {
- efx_farch_filter_table_clear_entry(efx, table, index);
- ret = true;
+ table->spec[index].priority == EFX_FILTER_PRI_HINT) {
+ struct efx_arfs_rule *rule = NULL;
+ struct efx_filter_spec spec;
+
+ efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
+ if (!efx->rps_hash_table) {
+ /* In the absence of the table, we always returned 0 to
+ * ARFS, so use the same to query it.
+ */
+ arfs_id = 0;
+ } else {
+ rule = efx_rps_hash_find(efx, &spec);
+ if (!rule) {
+ /* ARFS table doesn't know of this filter, remove it */
+ force = true;
+ } else {
+ arfs_id = rule->arfs_id;
+ if (!efx_rps_check_rule(rule, index, &force))
+ goto out_unlock;
+ }
+ }
+ if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
+ flow_id, arfs_id)) {
+ if (rule)
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+ efx_rps_hash_del(efx, &spec);
+ efx_farch_filter_table_clear_entry(efx, table, index);
+ ret = true;
+ }
}
-
+out_unlock:
+ spin_unlock_bh(&efx->rps_hash_lock);
up_write(&state->lock);
return ret;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 5e379a83c729..65568925c3ef 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -733,6 +733,56 @@ struct efx_rss_context {
u32 rx_indir_table[128];
};
+#ifdef CONFIG_RFS_ACCEL
+/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING
+ * is used to test if filter does or will exist.
+ */
+#define EFX_ARFS_FILTER_ID_PENDING -1
+#define EFX_ARFS_FILTER_ID_ERROR -2
+#define EFX_ARFS_FILTER_ID_REMOVING -3
+/**
+ * struct efx_arfs_rule - record of an ARFS filter and its IDs
+ * @node: linkage into hash table
+ * @spec: details of the filter (used as key for hash table). Use efx->type to
+ * determine which member to use.
+ * @rxq_index: channel to which the filter will steer traffic.
+ * @arfs_id: filter ID which was returned to ARFS
+ * @filter_id: index in software filter table. May be
+ * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet,
+ * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or
+ * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter.
+ */
+struct efx_arfs_rule {
+ struct hlist_node node;
+ struct efx_filter_spec spec;
+ u16 rxq_index;
+ u16 arfs_id;
+ s32 filter_id;
+};
+
+/* Size chosen so that the table is one page (4kB) */
+#define EFX_ARFS_HASH_TABLE_SIZE 512
+
+/**
+ * struct efx_async_filter_insertion - Request to asynchronously insert a filter
+ * @net_dev: Reference to the netdevice
+ * @spec: The filter to insert
+ * @work: Workitem for this request
+ * @rxq_index: Identifies the channel for which this request was made
+ * @flow_id: Identifies the kernel-side flow for which this request was made
+ */
+struct efx_async_filter_insertion {
+ struct net_device *net_dev;
+ struct efx_filter_spec spec;
+ struct work_struct work;
+ u16 rxq_index;
+ u32 flow_id;
+};
+
+/* Maximum number of ARFS workitems that may be in flight on an efx_nic */
+#define EFX_RPS_MAX_IN_FLIGHT 8
+#endif /* CONFIG_RFS_ACCEL */
+
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@@ -850,6 +900,12 @@ struct efx_rss_context {
* @rps_expire_channel: Next channel to check for expiry
* @rps_expire_index: Next index to check for expiry in
* @rps_expire_channel's @rps_flow_id
+ * @rps_slot_map: bitmap of in-flight entries in @rps_slot
+ * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
+ * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
+ * @rps_next_id).
+ * @rps_hash_table: Mapping between ARFS filters and their various IDs
+ * @rps_next_id: next arfs_id for an ARFS filter
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
@@ -1004,6 +1060,11 @@ struct efx_nic {
struct mutex rps_mutex;
unsigned int rps_expire_channel;
unsigned int rps_expire_index;
+ unsigned long rps_slot_map;
+ struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
+ spinlock_t rps_hash_lock;
+ struct hlist_head *rps_hash_table;
+ u32 rps_next_id;
#endif
atomic_t active_queues;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 95682831484e..64a94f242027 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -827,31 +827,36 @@ MODULE_PARM_DESC(rx_refill_threshold,
#ifdef CONFIG_RFS_ACCEL
-/**
- * struct efx_async_filter_insertion - Request to asynchronously insert a filter
- * @net_dev: Reference to the netdevice
- * @spec: The filter to insert
- * @work: Workitem for this request
- * @rxq_index: Identifies the channel for which this request was made
- * @flow_id: Identifies the kernel-side flow for which this request was made
- */
-struct efx_async_filter_insertion {
- struct net_device *net_dev;
- struct efx_filter_spec spec;
- struct work_struct work;
- u16 rxq_index;
- u32 flow_id;
-};
-
static void efx_filter_rfs_work(struct work_struct *data)
{
struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
work);
struct efx_nic *efx = netdev_priv(req->net_dev);
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
+ int slot_idx = req - efx->rps_slot;
+ struct efx_arfs_rule *rule;
+ u16 arfs_id = 0;
int rc;
- rc = efx->type->filter_insert(efx, &req->spec, false);
+ rc = efx->type->filter_insert(efx, &req->spec, true);
+ if (efx->rps_hash_table) {
+ spin_lock_bh(&efx->rps_hash_lock);
+ rule = efx_rps_hash_find(efx, &req->spec);
+ /* The rule might have already gone, if someone else's request
+ * for the same spec was already worked and then expired before
+ * we got around to our work. In that case we have nothing
+ * tying us to an arfs_id, meaning that as soon as the filter
+ * is considered for expiry it will be removed.
+ */
+ if (rule) {
+ if (rc < 0)
+ rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
+ else
+ rule->filter_id = rc;
+ arfs_id = rule->arfs_id;
+ }
+ spin_unlock_bh(&efx->rps_hash_lock);
+ }
if (rc >= 0) {
/* Remember this so we can check whether to expire the filter
* later.
@@ -863,23 +868,23 @@ static void efx_filter_rfs_work(struct work_struct *data)
if (req->spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
req->spec.rem_host, ntohs(req->spec.rem_port),
req->spec.loc_host, ntohs(req->spec.loc_port),
- req->rxq_index, req->flow_id, rc);
+ req->rxq_index, req->flow_id, rc, arfs_id);
else
netif_info(efx, rx_status, efx->net_dev,
- "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
req->spec.rem_host, ntohs(req->spec.rem_port),
req->spec.loc_host, ntohs(req->spec.loc_port),
- req->rxq_index, req->flow_id, rc);
+ req->rxq_index, req->flow_id, rc, arfs_id);
}
/* Release references */
+ clear_bit(slot_idx, &efx->rps_slot_map);
dev_put(req->net_dev);
- kfree(req);
}
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -887,23 +892,39 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_async_filter_insertion *req;
+ struct efx_arfs_rule *rule;
struct flow_keys fk;
+ int slot_idx;
+ bool new;
+ int rc;
- if (flow_id == RPS_FLOW_ID_INVALID)
- return -EINVAL;
+ /* find a free slot */
+ for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
+ if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
+ break;
+ if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
+ return -EBUSY;
- if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
- return -EPROTONOSUPPORT;
+ if (flow_id == RPS_FLOW_ID_INVALID) {
+ rc = -EINVAL;
+ goto out_clear;
+ }
- if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
- return -EPROTONOSUPPORT;
- if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
- return -EPROTONOSUPPORT;
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
+ rc = -EPROTONOSUPPORT;
+ goto out_clear;
+ }
- req = kmalloc(sizeof(*req), GFP_ATOMIC);
- if (!req)
- return -ENOMEM;
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
+ rc = -EPROTONOSUPPORT;
+ goto out_clear;
+ }
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
+ rc = -EPROTONOSUPPORT;
+ goto out_clear;
+ }
+ req = efx->rps_slot + slot_idx;
efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
@@ -927,12 +948,45 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
req->spec.rem_port = fk.ports.src;
req->spec.loc_port = fk.ports.dst;
+ if (efx->rps_hash_table) {
+ /* Add it to ARFS hash table */
+ spin_lock(&efx->rps_hash_lock);
+ rule = efx_rps_hash_add(efx, &req->spec, &new);
+ if (!rule) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ if (new)
+ rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
+ rc = rule->arfs_id;
+ /* Skip if existing or pending filter already does the right thing */
+ if (!new && rule->rxq_index == rxq_index &&
+ rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
+ goto out_unlock;
+ rule->rxq_index = rxq_index;
+ rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
+ spin_unlock(&efx->rps_hash_lock);
+ } else {
+ /* Without an ARFS hash table, we just use arfs_id 0 for all
+ * filters. This means if multiple flows hash to the same
+ * flow_id, all but the most recently touched will be eligible
+ * for expiry.
+ */
+ rc = 0;
+ }
+
+ /* Queue the request */
dev_hold(req->net_dev = net_dev);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
schedule_work(&req->work);
- return 0;
+ return rc;
+out_unlock:
+ spin_unlock(&efx->rps_hash_lock);
+out_clear:
+ clear_bit(slot_idx, &efx->rps_slot_map);
+ return rc;
}
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index c7bff596c665..dedd40613090 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -347,7 +347,7 @@ enum power_event {
#define MTL_RX_OVERFLOW_INT BIT(16)
/* Default operating mode of the MAC */
-#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
+#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \
GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
/* To dump the core regs excluding the Address Registers */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index a3af92ebbca8..517b1f6736a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -31,13 +31,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
value |= GMAC_CORE_INIT;
- /* Clear ACS bit because Ethernet switch tagging formats such as
- * Broadcom tags can look like invalid LLC/SNAP packets and cause the
- * hardware to truncate packets on reception.
- */
- if (netdev_uses_dsa(dev))
- value &= ~GMAC_CONFIG_ACS;
-
if (mtu > 1500)
value |= GMAC_CONFIG_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9a16931ce39d..b65e2d144698 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3495,8 +3495,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
*/
- if (unlikely(status != llc_snap))
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap))
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 30371274409d..74f828412055 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -129,7 +129,7 @@ do { \
#define RX_PRIORITY_MAPPING 0x76543210
#define TX_PRIORITY_MAPPING 0x33221100
-#define CPDMA_TX_PRIORITY_MAP 0x01234567
+#define CPDMA_TX_PRIORITY_MAP 0x76543210
#define CPSW_VLAN_AWARE BIT(1)
#define CPSW_RX_VLAN_ENCAP BIT(2)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 9cbb0c8a896a..7de88b33d5b9 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
- goto put_dev;
+ goto unregister;
/* need to be already registered so that ->init has run and
* the MAC addr is set
@@ -3316,8 +3316,7 @@ del_dev:
macsec_del_dev(macsec);
unlink:
netdev_upper_dev_unlink(real_dev, dev);
-put_dev:
- dev_put(real_dev);
+unregister:
unregister_netdevice(dev);
return err;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c22e8e383247..25e2a099b71c 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
if (err < 0)
goto error;
+ /* If WOL event happened once, the LED[2] interrupt pin
+ * will not be cleared unless we reading the interrupt status
+ * register. If interrupts are in use, the normal interrupt
+ * handling will clear the WOL event. Clear the WOL event
+ * before enabling it if !phy_interrupt_is_valid()
+ */
+ if (!phy_interrupt_is_valid(phydev))
+ phy_read(phydev, MII_M1011_IEVENT);
+
/* Enable the WOL interrupt */
err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
MII_88E1318S_PHY_CSIER_WOL_EIE);
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 0f293ef28935..a97ac8c12c4c 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -20,6 +20,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/microchipphy.h>
+#include <linux/delay.h>
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
#define DRIVER_DESC "Microchip LAN88XX PHY driver"
@@ -30,6 +31,16 @@ struct lan88xx_priv {
__u32 wolopts;
};
+static int lan88xx_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS);
+}
+
+static int lan88xx_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
+}
+
static int lan88xx_phy_config_intr(struct phy_device *phydev)
{
int rc;
@@ -66,6 +77,150 @@ static int lan88xx_suspend(struct phy_device *phydev)
return 0;
}
+static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
+ u32 data)
+{
+ int val, save_page, ret = 0;
+ u16 buf;
+
+ /* Save current page */
+ save_page = phy_save_page(phydev);
+ if (save_page < 0) {
+ pr_warn("Failed to get current page\n");
+ goto err;
+ }
+
+ /* Switch to TR page */
+ lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR);
+
+ ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA,
+ (data & 0xFFFF));
+ if (ret < 0) {
+ pr_warn("Failed to write TR low data\n");
+ goto err;
+ }
+
+ ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA,
+ (data & 0x00FF0000) >> 16);
+ if (ret < 0) {
+ pr_warn("Failed to write TR high data\n");
+ goto err;
+ }
+
+ /* Config control bits [15:13] of register */
+ buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */
+ buf |= 0x8000; /* Set [15] to Packet transmit */
+
+ ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf);
+ if (ret < 0) {
+ pr_warn("Failed to write data in reg\n");
+ goto err;
+ }
+
+ usleep_range(1000, 2000);/* Wait for Data to be written */
+ val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR);
+ if (!(val & 0x8000))
+ pr_warn("TR Register[0x%X] configuration failed\n", regaddr);
+err:
+ return phy_restore_page(phydev, save_page, ret);
+}
+
+static void lan88xx_config_TR_regs(struct phy_device *phydev)
+{
+ int err;
+
+ /* Get access to Channel 0x1, Node 0xF , Register 0x01.
+ * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf,
+ * MrvlTrFix1000Kp, MasterEnableTR bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x0F82]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x06.
+ * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv,
+ * SSTrKp1000Mas bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x168C]\n");
+
+ /* Get access to Channel b'10, Node b'1111, Register 0x11.
+ * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh
+ * bits
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x17A2]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x10.
+ * Write 24-bit value 0xEEFFDD to register. Setting
+ * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000,
+ * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x16A0]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x13.
+ * Write 24-bit value 0x071448 to register. Setting
+ * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x16A6]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x12.
+ * Write 24-bit value 0x13132F to register. Setting
+ * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x16A4]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x14.
+ * Write 24-bit value 0x0 to register. Setting eee_3level_delay,
+ * eee_TrKf_freeze_delay bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x16A8]\n");
+
+ /* Get access to Channel b'01, Node b'1111, Register 0x34.
+ * Write 24-bit value 0x91B06C to register. Setting
+ * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000,
+ * FastMseSearchUpdGain1000 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x0FE8]\n");
+
+ /* Get access to Channel b'01, Node b'1111, Register 0x3E.
+ * Write 24-bit value 0xC0A028 to register. Setting
+ * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000,
+ * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x0FFC]\n");
+
+ /* Get access to Channel b'01, Node b'1111, Register 0x35.
+ * Write 24-bit value 0x041600 to register. Setting
+ * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000,
+ * FastMsePhChangeDelay1000 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x0FEA]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x03.
+ * Write 24-bit value 0x000004 to register. Setting TrFreeze bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x1686]\n");
+}
+
static int lan88xx_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -132,6 +287,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev)
phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
}
+static int lan88xx_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ genphy_config_init(phydev);
+ /*Zerodetect delay enable */
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ PHY_ARDENNES_MMD_DEV_3_PHY_CFG);
+ val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_;
+
+ phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG,
+ val);
+
+ /* Config DSP registers */
+ lan88xx_config_TR_regs(phydev);
+
+ return 0;
+}
+
static int lan88xx_config_aneg(struct phy_device *phydev)
{
lan88xx_set_mdix(phydev);
@@ -151,7 +325,7 @@ static struct phy_driver microchip_phy_driver[] = {
.probe = lan88xx_probe,
.remove = lan88xx_remove,
- .config_init = genphy_config_init,
+ .config_init = lan88xx_config_init,
.config_aneg = lan88xx_config_aneg,
.ack_interrupt = lan88xx_phy_ack_interrupt,
@@ -160,6 +334,8 @@ static struct phy_driver microchip_phy_driver[] = {
.suspend = lan88xx_suspend,
.resume = genphy_resume,
.set_wol = lan88xx_set_wol,
+ .read_page = lan88xx_read_page,
+ .write_page = lan88xx_write_page,
} };
module_phy_driver(microchip_phy_driver);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 1483bc7b01e1..7df07337d69c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppox))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a6c6ce19eeee..ddb6bf85a59c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
+static bool __team_option_inst_tmp_find(const struct list_head *opts,
+ const struct team_option_inst *needle)
+{
+ struct team_option_inst *opt_inst;
+
+ list_for_each_entry(opt_inst, opts, tmp_list)
+ if (opt_inst == needle)
+ return true;
+ return false;
+}
+
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
{
struct netpoll *np;
int err;
- if (!team->dev->npinfo)
- return 0;
-
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
@@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
return err;
}
+static int team_port_enable_netpoll(struct team_port *port)
+{
+ if (!port->team->dev->npinfo)
+ return 0;
+
+ return __team_port_enable_netpoll(port);
+}
+
static void team_port_disable_netpoll(struct team_port *port)
{
struct netpoll *np = port->np;
@@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
kfree(np);
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
{
return 0;
}
@@ -1210,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
goto err_vids_add;
}
- err = team_port_enable_netpoll(team, port);
+ err = team_port_enable_netpoll(port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1907,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port);
+ err = __team_port_enable_netpoll(port);
if (err) {
__team_netpoll_cleanup(team);
break;
@@ -2568,6 +2584,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
+
+ /* dumb/evil user-space can send us duplicate opt,
+ * keep only the last one
+ */
+ if (__team_option_inst_tmp_find(&opt_inst_list,
+ opt_inst))
+ continue;
+
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 28583aa0c17d..ef33950a45d9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1102,12 +1102,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
len = run_ebpf_filter(tun, skb, len);
-
- /* Trim extra bytes since we may insert vlan proto & TCI
- * in tun_put_user().
- */
- len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
- if (len <= 0 || pskb_trim(skb, len))
+ if (len == 0 || pskb_trim(skb, len))
goto drop;
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index ca066b785e9f..c853e7410f5a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1107,6 +1107,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7b187ec7411e..770422e953f7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -147,6 +147,17 @@ struct receive_queue {
struct xdp_rxq_info xdp_rxq;
};
+/* Control VQ buffers: protected by the rtnl lock */
+struct control_buf {
+ struct virtio_net_ctrl_hdr hdr;
+ virtio_net_ctrl_ack status;
+ struct virtio_net_ctrl_mq mq;
+ u8 promisc;
+ u8 allmulti;
+ __virtio16 vid;
+ __virtio64 offloads;
+};
+
struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *cvq;
@@ -192,14 +203,7 @@ struct virtnet_info {
struct hlist_node node;
struct hlist_node node_dead;
- /* Control VQ buffers: protected by the rtnl lock */
- struct virtio_net_ctrl_hdr ctrl_hdr;
- virtio_net_ctrl_ack ctrl_status;
- struct virtio_net_ctrl_mq ctrl_mq;
- u8 ctrl_promisc;
- u8 ctrl_allmulti;
- u16 ctrl_vid;
- u64 ctrl_offloads;
+ struct control_buf *ctrl;
/* Ethtool settings */
u8 duplex;
@@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
- unsigned int received;
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct send_queue *sq;
+ unsigned int received, qp;
bool xdp_xmit = false;
virtnet_poll_cleantx(rq);
@@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
- if (xdp_xmit)
+ if (xdp_xmit) {
+ qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
+ smp_processor_id();
+ sq = &vi->sq[qp];
+ virtqueue_kick(sq->vq);
xdp_do_flush_map();
+ }
return received;
}
@@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
- vi->ctrl_status = ~0;
- vi->ctrl_hdr.class = class;
- vi->ctrl_hdr.cmd = cmd;
+ vi->ctrl->status = ~0;
+ vi->ctrl->hdr.class = class;
+ vi->ctrl->hdr.cmd = cmd;
/* Add header */
- sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
+ sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
sgs[out_num++] = &hdr;
if (out)
sgs[out_num++] = out;
/* Add return status. */
- sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
+ sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (unlikely(!virtqueue_kick(vi->cvq)))
- return vi->ctrl_status == VIRTIO_NET_OK;
+ return vi->ctrl->status == VIRTIO_NET_OK;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
@@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
!virtqueue_is_broken(vi->cvq))
cpu_relax();
- return vi->ctrl_status == VIRTIO_NET_OK;
+ return vi->ctrl->status == VIRTIO_NET_OK;
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
- vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
- sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
+ vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+ sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
- vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
- vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+ vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
+ vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
- sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
+ sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
- vi->ctrl_promisc ? "en" : "dis");
+ vi->ctrl->promisc ? "en" : "dis");
- sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
+ sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
- vi->ctrl_allmulti ? "en" : "dis");
+ vi->ctrl->allmulti ? "en" : "dis");
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
@@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
- vi->ctrl_vid = vid;
- sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
+ vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
- vi->ctrl_vid = vid;
- sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
+ vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
struct scatterlist sg;
- vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads);
+ vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
- sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads));
+ sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
kfree(vi->rq);
kfree(vi->sq);
+ kfree(vi->ctrl);
}
static void _free_receive_bufs(struct virtnet_info *vi)
@@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
{
int i;
+ vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
+ if (!vi->ctrl)
+ goto err_ctrl;
vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
if (!vi->sq)
goto err_sq;
@@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
err_rq:
kfree(vi->sq);
err_sq:
+ kfree(vi->ctrl);
+err_ctrl:
return -ENOMEM;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e04937f44f33..9ebe2a689966 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
union {
void *ptr;
struct ethhdr *eth;
+ struct vlan_ethhdr *veth;
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
struct tcphdr *tcp;
@@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
return 0;
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+ skb->protocol == cpu_to_be16(ETH_P_8021AD))
+ hlen = sizeof(struct vlan_ethhdr);
+ else
+ hlen = sizeof(struct ethhdr);
+
hdr.eth = eth_hdr(skb);
if (gdesc->rcd.v4) {
- BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
- hdr.ptr += sizeof(struct ethhdr);
+ BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
+ hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
+ hdr.ptr += hlen;
BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
hlen = hdr.ipv4->ihl << 2;
hdr.ptr += hdr.ipv4->ihl << 2;
} else if (gdesc->rcd.v6) {
- BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
- hdr.ptr += sizeof(struct ethhdr);
+ BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
+ hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
+ hdr.ptr += hlen;
/* Use an estimated value, since we also need to handle
* TSO case.
*/
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 59ec34052a65..a3326463b71f 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 85997184e047..9d36473dc2a2 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -103,8 +103,7 @@ config NVDIMM_DAX
Select Y if unsure
config OF_PMEM
- # FIXME: make tristate once OF_NUMA dependency removed
- bool "Device-tree support for persistent memory regions"
+ tristate "Device-tree support for persistent memory regions"
depends on OF
default LIBNVDIMM
help
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index e00d45522b80..8d348b22ba45 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+ int rc = validate_dimm(ndd), cmd_rc = 0;
struct nd_cmd_get_config_data_hdr *cmd;
struct nvdimm_bus_descriptor *nd_desc;
- int rc = validate_dimm(ndd);
u32 max_cmd_size, config_size;
size_t offset;
@@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
cmd->in_offset = offset;
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
ND_CMD_GET_CONFIG_DATA, cmd,
- cmd->in_length + sizeof(*cmd), NULL);
- if (rc || cmd->status) {
- rc = -ENXIO;
+ cmd->in_length + sizeof(*cmd), &cmd_rc);
+ if (rc < 0)
+ break;
+ if (cmd_rc < 0) {
+ rc = cmd_rc;
break;
}
memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
@@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len)
{
- int rc = validate_dimm(ndd);
size_t max_cmd_size, buf_offset;
struct nd_cmd_set_config_hdr *cmd;
+ int rc = validate_dimm(ndd), cmd_rc = 0;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
@@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
for (buf_offset = 0; len; len -= cmd->in_length,
buf_offset += cmd->in_length) {
size_t cmd_size;
- u32 *status;
cmd->in_offset = offset + buf_offset;
cmd->in_length = min(max_cmd_size, len);
@@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
/* status is output in the last 4-bytes of the command buffer */
cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
- status = ((void *) cmd) + cmd_size - sizeof(u32);
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
- if (rc || *status) {
- rc = rc ? rc : -ENXIO;
+ ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
+ if (rc < 0)
+ break;
+ if (cmd_rc < 0) {
+ rc = cmd_rc;
break;
}
}
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 85013bad35de..0a701837dfc0 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
*/
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.attr_groups = region_attr_groups;
- ndr_desc.numa_node = of_node_to_nid(np);
+ ndr_desc.numa_node = dev_to_node(&pdev->dev);
ndr_desc.res = &pdev->resource[i];
ndr_desc.of_node = np;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 6ace47099fc5..b9a131137e64 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -958,10 +958,11 @@ static int pci_pm_freeze(struct device *dev)
* devices should not be touched during freeze/thaw transitions,
* however.
*/
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ if (!dev_pm_smart_suspend_and_suspended(dev)) {
pm_runtime_resume(dev);
+ pci_dev->state_saved = false;
+ }
- pci_dev->state_saved = false;
if (pm->freeze) {
int error;
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9d27016c899e..0434ab7b6497 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req,
tx->callback = dma_xfer_callback;
tx->callback_param = req;
- req->dmach = chan;
- req->sync = sync;
req->status = DMA_IN_PROGRESS;
- init_completion(&req->req_comp);
kref_get(&req->refcount);
cookie = dmaengine_submit(tx);
@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
if (!req)
return -ENOMEM;
- kref_init(&req->refcount);
-
ret = get_dma_channel(priv);
if (ret) {
kfree(req);
return ret;
}
+ chan = priv->dmach;
+
+ kref_init(&req->refcount);
+ init_completion(&req->req_comp);
+ req->dir = dir;
+ req->filp = filp;
+ req->priv = priv;
+ req->dmach = chan;
+ req->sync = sync;
/*
* If parameter loc_addr != NULL, we are transferring data from/to
@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
xfer->offset, xfer->length);
}
- req->dir = dir;
- req->filp = filp;
- req->priv = priv;
- chan = priv->dmach;
-
nents = dma_map_sg(chan->device->dev,
req->sgt.sgl, req->sgt.nents, dir);
if (nents == 0) {
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 62f5f04d8f61..5e963fe0e38d 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
int dasd_alias_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
- struct alias_lcu *lcu;
+ __u8 uaddr = private->uid.real_unit_addr;
+ struct alias_lcu *lcu = private->lcu;
unsigned long flags;
int rc;
- lcu = private->lcu;
rc = 0;
spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * Check if device and lcu type differ. If so, the uac data may be
+ * outdated and needs to be updated.
+ */
+ if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
+ lcu->flags |= UPDATE_PENDING;
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "uid type mismatch - trigger rescan");
+ }
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f035c2f25d35..131f1989f6f3 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -27,7 +27,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/vtoc.h>
-#include <asm/diag.h>
#include "dasd_int.h"
#include "dasd_diag.h"
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 5f8d9ea69ebd..eceba3858cef 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -18,7 +18,7 @@ int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
* Used to keep track of the size of the event masks. Qemu until version 2.11
* only supports 4 and needs a workaround.
*/
-bool sclp_mask_compat_mode;
+bool sclp_mask_compat_mode __section(.data);
void sclp_early_wait_irq(void)
{
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6652a49a49b1..9029804dcd22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
+ struct channel_path *chp;
struct chp_link link;
struct chp_id chpid;
int status;
@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
- if (status < 0)
- chp_new(chpid);
- else if (!status)
+ if (!status)
return;
+
+ if (status < 0) {
+ chp_new(chpid);
+ } else {
+ chp = chpid_to_chp(chpid);
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index ff6963ad6e39..3c800642134e 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
int ccode;
__u8 lpm;
unsigned long flags;
+ int ret;
sch = private->sch;
spin_lock_irqsave(sch->lock, flags);
private->state = VFIO_CCW_STATE_BUSY;
- spin_unlock_irqrestore(sch->lock, flags);
orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
@@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
* Initialize device status information
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
- return 0;
+ ret = 0;
+ break;
case 1: /* Status pending */
case 2: /* Busy */
- return -EBUSY;
+ ret = -EBUSY;
+ break;
case 3: /* Device/path not operational */
{
lpm = orb->cmd.lpm;
@@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
sch->lpm = 0;
if (cio_update_schib(sch))
- return -ENODEV;
-
- return sch->lpm ? -EACCES : -ENODEV;
+ ret = -ENODEV;
+ else
+ ret = sch->lpm ? -EACCES : -ENODEV;
+ break;
}
default:
- return ccode;
+ ret = ccode;
}
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
}
static void fsm_notoper(struct vfio_ccw_private *private,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 4326715dc13e..78b98b3e7efa 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -557,7 +557,6 @@ enum qeth_prot_versions {
enum qeth_cmd_buffer_state {
BUF_STATE_FREE,
BUF_STATE_LOCKED,
- BUF_STATE_PROCESSED,
};
enum qeth_cq {
@@ -601,7 +600,6 @@ struct qeth_channel {
struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
atomic_t irq_pending;
int io_buf_no;
- int buf_no;
};
/**
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 04fefa5bb08d..dffd820731f2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
qeth_put_reply(reply);
}
spin_unlock_irqrestore(&card->lock, flags);
- atomic_set(&card->write.irq_pending, 0);
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
@@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
qeth_release_buffer(channel, &channel->iob[cnt]);
- channel->buf_no = 0;
channel->io_buf_no = 0;
}
EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
@@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
kfree(channel->iob[cnt].data);
return -ENOMEM;
}
- channel->buf_no = 0;
channel->io_buf_no = 0;
atomic_set(&channel->irq_pending, 0);
spin_lock_init(&channel->iob_lock);
@@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
{
int rc;
int cstat, dstat;
- struct qeth_cmd_buffer *buffer;
+ struct qeth_cmd_buffer *iob = NULL;
struct qeth_channel *channel;
struct qeth_card *card;
- struct qeth_cmd_buffer *iob;
- __u8 index;
-
- if (__qeth_check_irb_error(cdev, intparm, irb))
- return;
- cstat = irb->scsw.cmd.cstat;
- dstat = irb->scsw.cmd.dstat;
card = CARD_FROM_CDEV(cdev);
if (!card)
@@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
channel = &card->data;
QETH_CARD_TEXT(card, 5, "data");
}
+
+ if (qeth_intparm_is_iob(intparm))
+ iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+
+ if (__qeth_check_irb_error(cdev, intparm, irb)) {
+ /* IO was terminated, free its resources. */
+ if (iob)
+ qeth_release_buffer(iob->channel, iob);
+ atomic_set(&channel->irq_pending, 0);
+ wake_up(&card->wait_q);
+ return;
+ }
+
atomic_set(&channel->irq_pending, 0);
if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
@@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
/* we don't have to handle this further */
intparm = 0;
}
+
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+
if ((dstat & DEV_STAT_UNIT_EXCEP) ||
(dstat & DEV_STAT_UNIT_CHECK) ||
(cstat)) {
@@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
channel->state = CH_STATE_RCD_DONE;
goto out;
}
- if (intparm) {
- buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
- buffer->state = BUF_STATE_PROCESSED;
- }
if (channel == &card->data)
return;
if (channel == &card->read &&
channel->state == CH_STATE_UP)
__qeth_issue_next_read(card);
- iob = channel->iob;
- index = channel->buf_no;
- while (iob[index].state == BUF_STATE_PROCESSED) {
- if (iob[index].callback != NULL)
- iob[index].callback(channel, iob + index);
+ if (iob && iob->callback)
+ iob->callback(iob->channel, iob);
- index = (index + 1) % QETH_CMD_BUFFER_NO;
- }
- channel->buf_no = index;
out:
wake_up(&card->wait_q);
return;
@@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start(channel->ccwdev,
- &channel->ccw, (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ (addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
@@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
if (channel->state != CH_STATE_UP) {
rc = -ETIME;
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- qeth_clear_cmd_buffers(channel);
} else
rc = 0;
return rc;
@@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start(channel->ccwdev,
- &channel->ccw, (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ (addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
@@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
dev_name(&channel->ccwdev->dev));
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
- qeth_clear_cmd_buffers(channel);
return -ETIME;
}
return qeth_idx_activate_get_answer(channel, idx_reply_cb);
@@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
- (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ (addr_t) iob, 0, 0, event_timeout);
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
@@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
}
- if (reply->rc == -EIO)
- goto error;
rc = reply->rc;
qeth_put_reply(reply);
return rc;
@@ -2211,10 +2204,6 @@ time_err:
list_del_init(&reply->list);
spin_unlock_irqrestore(&reply->card->lock, flags);
atomic_inc(&reply->received);
-error:
- atomic_set(&card->write.irq_pending, 0);
- qeth_release_buffer(iob->channel, iob);
- card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
rc = reply->rc;
qeth_put_reply(reply);
return rc;
@@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card)
return rc;
}
-static int qeth_default_setadapterparms_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
+static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 4, "defadpcb");
-
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0)
+ if (!cmd->hdr.return_code)
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
- return 0;
+ return cmd->hdr.return_code;
}
static int qeth_query_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 3, "quyadpcb");
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- cmd = (struct qeth_ipa_cmd *) data;
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
@@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
}
card->options.adp.supported_funcs =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
- return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
+ return 0;
}
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
@@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
- struct qeth_switch_info *sw_info;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_query_switch_attributes *attrs;
+ struct qeth_switch_info *sw_info;
QETH_CARD_TEXT(card, 2, "qswiatcb");
- cmd = (struct qeth_ipa_cmd *) data;
- sw_info = (struct qeth_switch_info *)reply->param;
- if (cmd->data.setadapterparms.hdr.return_code == 0) {
- attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
- sw_info->capabilities = attrs->capabilities;
- sw_info->settings = attrs->settings;
- QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
- sw_info->settings);
- }
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
+ sw_info = (struct qeth_switch_info *)reply->param;
+ attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
+ sw_info->capabilities = attrs->capabilities;
+ sw_info->settings = attrs->settings;
+ QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
+ sw_info->settings);
return 0;
}
@@ -4207,16 +4189,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_setadpparms *setparms;
QETH_CARD_TEXT(card, 4, "prmadpcb");
- cmd = (struct qeth_ipa_cmd *) data;
setparms = &(cmd->data.setadapterparms);
-
- qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
- if (cmd->hdr.return_code) {
+ if (qeth_setadpparms_inspect_rc(cmd)) {
QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
@@ -4286,18 +4265,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 4, "chgmaccb");
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 ||
!(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
ether_addr_copy(card->dev->dev_addr,
cmd->data.setadapterparms.data.change_addr.addr);
card->info.mac_bits |= QETH_LAYER2_MAC_READ;
}
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
return 0;
}
@@ -4328,13 +4307,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_set_access_ctrl *access_ctrl_req;
int fallback = *(int *)reply->param;
QETH_CARD_TEXT(card, 4, "setaccb");
+ if (cmd->hdr.return_code)
+ return 0;
+ qeth_setadpparms_inspect_rc(cmd);
- cmd = (struct qeth_ipa_cmd *) data;
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
QETH_DBF_TEXT_(SETUP, 2, "setaccb");
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -4407,7 +4388,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
card->options.isolation = card->options.prev_isolation;
break;
}
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
return 0;
}
@@ -4695,14 +4675,15 @@ out:
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
struct qeth_qoat_priv *priv;
char *resdata;
int resdatalen;
QETH_CARD_TEXT(card, 3, "qoatcb");
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- cmd = (struct qeth_ipa_cmd *)data;
priv = (struct qeth_qoat_priv *)reply->param;
resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
resdata = (char *)data + 28;
@@ -4796,21 +4777,18 @@ out:
static int qeth_query_card_info_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
struct qeth_query_card_info *card_info;
- struct carrier_info *carrier_info;
QETH_CARD_TEXT(card, 2, "qcrdincb");
- carrier_info = (struct carrier_info *)reply->param;
- cmd = (struct qeth_ipa_cmd *)data;
- card_info = &cmd->data.setadapterparms.data.card_info;
- if (cmd->data.setadapterparms.hdr.return_code == 0) {
- carrier_info->card_type = card_info->card_type;
- carrier_info->port_mode = card_info->port_mode;
- carrier_info->port_speed = card_info->port_speed;
- }
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ card_info = &cmd->data.setadapterparms.data.card_info;
+ carrier_info->card_type = card_info->card_type;
+ carrier_info->port_mode = card_info->port_mode;
+ carrier_info->port_speed = card_info->port_speed;
return 0;
}
@@ -4857,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
goto out;
}
- ccw_device_get_id(CARD_DDEV(card), &id);
+ ccw_device_get_id(CARD_RDEV(card), &id);
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION2;
request->op_code = DIAG26C_GET_MAC;
@@ -6563,10 +6541,14 @@ static int __init qeth_core_init(void)
mutex_init(&qeth_mod_mutex);
qeth_wq = create_singlethread_workqueue("qeth_wq");
+ if (!qeth_wq) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
rc = qeth_register_dbf_views();
if (rc)
- goto out_err;
+ goto dbf_err;
qeth_core_root_dev = root_device_register("qeth");
rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
if (rc)
@@ -6603,6 +6585,8 @@ slab_err:
root_device_unregister(qeth_core_root_dev);
register_err:
qeth_unregister_dbf_views();
+dbf_err:
+ destroy_workqueue(qeth_wq);
out_err:
pr_err("Initializing the qeth device driver failed\n");
return rc;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 619f897b4bb0..f4d1ec0b8f5a 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[];
#define QETH_HALT_CHANNEL_PARM -11
#define QETH_RCD_PARM -12
+static inline bool qeth_intparm_is_iob(unsigned long intparm)
+{
+ switch (intparm) {
+ case QETH_CLEAR_CHANNEL_PARM:
+ case QETH_HALT_CHANNEL_PARM:
+ case QETH_RCD_PARM:
+ case 0:
+ return false;
+ }
+ return true;
+}
+
/*****************************************************************************/
/* IP Assist related definitions */
/*****************************************************************************/
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 50a313806dde..b8079f2a65b3 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -21,7 +21,6 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/hashtable.h>
-#include <linux/string.h>
#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
@@ -122,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Setmac");
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
if (rc == 0) {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- ether_addr_copy(card->dev->dev_addr, mac);
dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
+ "MAC address %pM successfully registered on device %s\n",
+ mac, card->dev->name);
} else {
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
switch (rc) {
case -EEXIST:
dev_warn(&card->gdev->dev,
@@ -143,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
return rc;
}
-static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 2, "L2Delmac");
- if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- return 0;
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
- if (rc == 0)
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- return rc;
-}
-
static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
{
enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
@@ -520,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct qeth_card *card = dev->ml_priv;
+ u8 old_addr[ETH_ALEN];
int rc = 0;
QETH_CARD_TEXT(card, 3, "setmac");
@@ -531,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -EOPNOTSUPP;
}
QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "setmcREC");
return -ERESTARTSYS;
}
- rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc || (rc == -ENOENT))
- rc = qeth_l2_send_setmac(card, addr->sa_data);
- return rc ? -EINVAL : 0;
+
+ if (!qeth_card_hw_is_reachable(card)) {
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+ return 0;
+ }
+
+ /* don't register the same address twice */
+ if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
+ (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+ return 0;
+
+ /* add the new address, switch over, drop the old */
+ rc = qeth_l2_send_setmac(card, addr->sa_data);
+ if (rc)
+ return rc;
+ ether_addr_copy(old_addr, dev->dev_addr);
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+ qeth_l2_remove_mac(card, old_addr);
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ return 0;
}
static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1068,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- if (card->info.type != QETH_CARD_TYPE_OSN)
- qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
+ if (card->info.type != QETH_CARD_TYPE_OSN &&
+ !qeth_l2_send_setmac(card, card->dev->dev_addr))
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
@@ -1339,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
qeth_prepare_control_data(card, len, iob);
QETH_CARD_TEXT(card, 6, "osnoirqp");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
- (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 3b0c8b8a7634..066b5c3aaae6 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -176,7 +176,7 @@ static struct device_driver smsg_driver = {
static void __exit smsg_exit(void)
{
- cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+ cpcmd("SET SMSG OFF", NULL, 0, NULL);
device_unregister(smsg_dev);
iucv_unregister(&smsg_handler, 1);
driver_unregister(&smsg_driver);
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index abddde11982b..98597b59c12a 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -296,7 +296,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of Abort FW Timeouts: %lld\n"
"Number of Abort IO NOT Found: %lld\n"
- "Abord issued times: \n"
+ "Abort issued times: \n"
" < 6 sec : %lld\n"
" 6 sec - 20 sec : %lld\n"
" 20 sec - 30 sec : %lld\n"
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ce97cde3b41c..f4d988dd1e9d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1124,12 +1124,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_fw_init;
}
- ret = 0;
+ return 0;
fail_fw_init:
dev_err(&instance->pdev->dev,
- "Init cmd return status %s for SCSI host %d\n",
- ret ? "FAILED" : "SUCCESS", instance->host->host_no);
+ "Init cmd return status FAILED for SCSI host %d\n",
+ instance->host->host_no);
return ret;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9ef5e3b810f6..656c98e116a9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -234,11 +234,13 @@ static const char *sdebug_version_date = "20180128";
#define F_INV_OP 0x200
#define F_FAKE_RW 0x400
#define F_M_ACCESS 0x800 /* media access */
-#define F_LONG_DELAY 0x1000
+#define F_SSU_DELAY 0x1000
+#define F_SYNC_DELAY 0x2000
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
+#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
#define SDEBUG_MAX_PARTS 4
@@ -510,7 +512,7 @@ static const struct opcode_info_t release_iarr[] = {
};
static const struct opcode_info_t sync_cache_iarr[] = {
- {0, 0x91, 0, F_LONG_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
+ {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
{16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
};
@@ -553,7 +555,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
resp_write_dt0, write_iarr, /* WRITE(16) */
{16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
- {0, 0x1b, 0, F_LONG_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
+ {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
{6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
@@ -606,7 +608,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
0, 0, 0, 0, 0} },
- {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_LONG_DELAY | F_M_ACCESS,
+ {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
resp_sync_cache, sync_cache_iarr,
{10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* SYNC_CACHE (10) */
@@ -667,6 +669,7 @@ static bool sdebug_strict = DEF_STRICT;
static bool sdebug_any_injecting_opt;
static bool sdebug_verbose;
static bool have_dif_prot;
+static bool write_since_sync;
static bool sdebug_statistics = DEF_STATISTICS;
static unsigned int sdebug_store_sectors;
@@ -1607,6 +1610,7 @@ static int resp_start_stop(struct scsi_cmnd *scp,
{
unsigned char *cmd = scp->cmnd;
int power_cond, stop;
+ bool changing;
power_cond = (cmd[4] & 0xf0) >> 4;
if (power_cond) {
@@ -1614,8 +1618,12 @@ static int resp_start_stop(struct scsi_cmnd *scp,
return check_condition_result;
}
stop = !(cmd[4] & 1);
+ changing = atomic_read(&devip->stopped) == !stop;
atomic_xchg(&devip->stopped, stop);
- return (cmd[1] & 0x1) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
+ if (!changing || cmd[1] & 0x1) /* state unchanged or IMMED set */
+ return SDEG_RES_IMMED_MASK;
+ else
+ return 0;
}
static sector_t get_sdebug_capacity(void)
@@ -2473,6 +2481,7 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
if (do_write) {
sdb = scsi_out(scmd);
dir = DMA_TO_DEVICE;
+ write_since_sync = true;
} else {
sdb = scsi_in(scmd);
dir = DMA_FROM_DEVICE;
@@ -3583,6 +3592,7 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
static int resp_sync_cache(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
+ int res = 0;
u64 lba;
u32 num_blocks;
u8 *cmd = scp->cmnd;
@@ -3598,7 +3608,11 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
return check_condition_result;
}
- return (cmd[1] & 0x2) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
+ if (!write_since_sync || cmd[1] & 0x2)
+ res = SDEG_RES_IMMED_MASK;
+ else /* delay if write_since_sync and IMMED clear */
+ write_since_sync = false;
+ return res;
}
#define RL_BUCKET_ELEMS 8
@@ -5777,13 +5791,14 @@ fini:
return schedule_resp(scp, devip, errsts, pfp, 0, 0);
else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
/*
- * If any delay is active, want F_LONG_DELAY to be at least 1
+ * If any delay is active, for F_SSU_DELAY want at least 1
* second and if sdebug_jdelay>0 want a long delay of that
- * many seconds.
+ * many seconds; for F_SYNC_DELAY want 1/20 of that.
*/
int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
+ int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
- jdelay = mult_frac(USER_HZ * jdelay, HZ, USER_HZ);
+ jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
} else
return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f4b52b44b966..65f6c94f2e9b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
return nlmsg_multicast(nls, skb, 0, group, gfp);
}
+static int
+iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
+{
+ return nlmsg_unicast(nls, skb, portid);
+}
+
int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
@@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
static int
-iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
- void *payload, int size)
+iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
int len = nlmsg_total_size(size);
- int flags = multi ? NLM_F_MULTI : 0;
- int t = done ? NLMSG_DONE : type;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
@@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
return -ENOMEM;
}
- nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
- nlh->nlmsg_flags = flags;
+ nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
memcpy(nlmsg_data(nlh), payload, size);
- return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
+ return iscsi_unicast_skb(skb, portid);
}
static int
@@ -3470,6 +3472,7 @@ static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
+ u32 portid;
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
@@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
if (!try_module_get(transport->owner))
return -EINVAL;
+ portid = NETLINK_CB(skb).portid;
+
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
- NETLINK_CB(skb).portid,
+ portid,
ev->u.c_session.initial_cmdsn,
ev->u.c_session.cmds_max,
ev->u.c_session.queue_depth);
@@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
}
err = iscsi_if_create_session(priv, ep, ev,
- NETLINK_CB(skb).portid,
+ portid,
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
@@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
static void
iscsi_if_rx(struct sk_buff *skb)
{
+ u32 portid = NETLINK_CB(skb).portid;
+
mutex_lock(&rx_queue_mutex);
while (skb->len >= NLMSG_HDRLEN) {
int err;
@@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb)
break;
if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
break;
- err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+ err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
+ ev, sizeof(*ev));
} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
skb_pull(skb, rlen);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a6201e696ab9..9421d9877730 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2121,6 +2121,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
break; /* standby */
if (sshdr.asc == 4 && sshdr.ascq == 0xc)
break; /* unavailable */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+ break; /* sanitize in progress */
/*
* Issue command to spin up drive when not ready
*/
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 41df75eea57b..210407cd2341 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -400,8 +400,10 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
*
* Check that all zones of the device are equal. The last zone can however
* be smaller. The zone size must also be a power of two number of LBAs.
+ *
+ * Returns the zone size in bytes upon success or an error code upon failure.
*/
-static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
{
u64 zone_blocks = 0;
sector_t block = 0;
@@ -412,8 +414,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
int ret;
u8 same;
- sdkp->zone_blocks = 0;
-
/* Get a buffer */
buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
if (!buf)
@@ -445,16 +445,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
/* Parse zone descriptors */
while (rec < buf + buf_len) {
- zone_blocks = get_unaligned_be64(&rec[8]);
- if (sdkp->zone_blocks == 0) {
- sdkp->zone_blocks = zone_blocks;
- } else if (zone_blocks != sdkp->zone_blocks &&
- (block + zone_blocks < sdkp->capacity
- || zone_blocks > sdkp->zone_blocks)) {
- zone_blocks = 0;
+ u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
+
+ if (zone_blocks == 0) {
+ zone_blocks = this_zone_blocks;
+ } else if (this_zone_blocks != zone_blocks &&
+ (block + this_zone_blocks < sdkp->capacity
+ || this_zone_blocks > zone_blocks)) {
+ this_zone_blocks = 0;
goto out;
}
- block += zone_blocks;
+ block += this_zone_blocks;
rec += 64;
}
@@ -467,8 +468,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
} while (block < sdkp->capacity);
- zone_blocks = sdkp->zone_blocks;
-
out:
if (!zone_blocks) {
if (sdkp->first_scan)
@@ -488,8 +487,7 @@ out:
"Zone size too large\n");
ret = -ENODEV;
} else {
- sdkp->zone_blocks = zone_blocks;
- sdkp->zone_shift = ilog2(zone_blocks);
+ ret = zone_blocks;
}
out_free:
@@ -500,15 +498,14 @@ out_free:
/**
* sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
- * @sdkp: The disk of the bitmap
+ * @nr_zones: Number of zones to allocate space for.
+ * @numa_node: NUMA node to allocate the memory from.
*/
-static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
+static inline unsigned long *
+sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
{
- struct request_queue *q = sdkp->disk->queue;
-
- return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
- * sizeof(unsigned long),
- GFP_KERNEL, q->node);
+ return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long),
+ GFP_KERNEL, numa_node);
}
/**
@@ -516,6 +513,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
* @sdkp: disk used
* @buf: report reply buffer
* @buflen: length of @buf
+ * @zone_shift: logarithm base 2 of the number of blocks in a zone
* @seq_zones_bitmap: bitmap of sequential zones to set
*
* Parse reported zone descriptors in @buf to identify sequential zones and
@@ -525,7 +523,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
* Return the LBA after the last zone reported.
*/
static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
- unsigned int buflen,
+ unsigned int buflen, u32 zone_shift,
unsigned long *seq_zones_bitmap)
{
sector_t lba, next_lba = sdkp->capacity;
@@ -544,7 +542,7 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
if (type != ZBC_ZONE_TYPE_CONV &&
cond != ZBC_ZONE_COND_READONLY &&
cond != ZBC_ZONE_COND_OFFLINE)
- set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap);
+ set_bit(lba >> zone_shift, seq_zones_bitmap);
next_lba = lba + get_unaligned_be64(&rec[8]);
rec += 64;
}
@@ -553,12 +551,16 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
}
/**
- * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap.
+ * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
* @sdkp: target disk
+ * @zone_shift: logarithm base 2 of the number of blocks in a zone
+ * @nr_zones: number of zones to set up a seq zone bitmap for
*
* Allocate a zone bitmap and initialize it by identifying sequential zones.
*/
-static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
+static unsigned long *
+sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
+ u32 nr_zones)
{
struct request_queue *q = sdkp->disk->queue;
unsigned long *seq_zones_bitmap;
@@ -566,9 +568,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
unsigned char *buf;
int ret = -ENOMEM;
- seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp);
+ seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
if (!seq_zones_bitmap)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
if (!buf)
@@ -579,7 +581,7 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
if (ret)
goto out;
lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
- seq_zones_bitmap);
+ zone_shift, seq_zones_bitmap);
}
if (lba != sdkp->capacity) {
@@ -591,12 +593,9 @@ out:
kfree(buf);
if (ret) {
kfree(seq_zones_bitmap);
- return ret;
+ return ERR_PTR(ret);
}
-
- q->seq_zones_bitmap = seq_zones_bitmap;
-
- return 0;
+ return seq_zones_bitmap;
}
static void sd_zbc_cleanup(struct scsi_disk *sdkp)
@@ -612,44 +611,64 @@ static void sd_zbc_cleanup(struct scsi_disk *sdkp)
q->nr_zones = 0;
}
-static int sd_zbc_setup(struct scsi_disk *sdkp)
+static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
{
struct request_queue *q = sdkp->disk->queue;
+ u32 zone_shift = ilog2(zone_blocks);
+ u32 nr_zones;
int ret;
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
-
/* chunk_sectors indicates the zone size */
- blk_queue_chunk_sectors(sdkp->disk->queue,
- logical_to_sectors(sdkp->device, sdkp->zone_blocks));
- sdkp->nr_zones =
- round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
+ blk_queue_chunk_sectors(q,
+ logical_to_sectors(sdkp->device, zone_blocks));
+ nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
/*
* Initialize the device request queue information if the number
* of zones changed.
*/
- if (sdkp->nr_zones != q->nr_zones) {
-
- sd_zbc_cleanup(sdkp);
-
- q->nr_zones = sdkp->nr_zones;
- if (sdkp->nr_zones) {
- q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp);
- if (!q->seq_zones_wlock) {
+ if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
+ unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
+ size_t zone_bitmap_size;
+
+ if (nr_zones) {
+ seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
+ q->node);
+ if (!seq_zones_wlock) {
ret = -ENOMEM;
goto err;
}
- ret = sd_zbc_setup_seq_zones_bitmap(sdkp);
- if (ret) {
- sd_zbc_cleanup(sdkp);
+ seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
+ zone_shift, nr_zones);
+ if (IS_ERR(seq_zones_bitmap)) {
+ ret = PTR_ERR(seq_zones_bitmap);
+ kfree(seq_zones_wlock);
goto err;
}
}
-
+ zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
+ sizeof(unsigned long);
+ blk_mq_freeze_queue(q);
+ if (q->nr_zones != nr_zones) {
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->zone_shift = zone_shift;
+ sdkp->nr_zones = nr_zones;
+ q->nr_zones = nr_zones;
+ swap(q->seq_zones_wlock, seq_zones_wlock);
+ swap(q->seq_zones_bitmap, seq_zones_bitmap);
+ } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
+ zone_bitmap_size) != 0) {
+ memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
+ zone_bitmap_size);
+ }
+ blk_mq_unfreeze_queue(q);
+ kfree(seq_zones_wlock);
+ kfree(seq_zones_bitmap);
}
return 0;
@@ -661,6 +680,7 @@ err:
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
+ int64_t zone_blocks;
int ret;
if (!sd_is_zoned(sdkp))
@@ -697,12 +717,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
* Check zone size: only devices with a constant zone size (except
* an eventual last runt zone) that is a power of 2 are supported.
*/
- ret = sd_zbc_check_zone_size(sdkp);
- if (ret)
+ zone_blocks = sd_zbc_check_zone_size(sdkp);
+ ret = -EFBIG;
+ if (zone_blocks != (u32)zone_blocks)
+ goto err;
+ ret = zone_blocks;
+ if (ret < 0)
goto err;
/* The drive satisfies the kernel restrictions: set it up */
- ret = sd_zbc_setup(sdkp);
+ ret = sd_zbc_setup(sdkp, zone_blocks);
if (ret)
goto err;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c5b1bf1cadcb..00e79057f870 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -276,6 +276,35 @@ static inline void ufshcd_remove_non_printable(char *val)
*val = ' ';
}
+static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
+}
+
+static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
+}
+
+static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_task_req_desc *descp;
+ struct utp_upiu_task_req *task_req;
+ int off = (int)tag - hba->nutrs;
+
+ descp = &hba->utmrdl_base_addr[off];
+ task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
+ &task_req->input_param1);
+}
+
static void ufshcd_add_command_trace(struct ufs_hba *hba,
unsigned int tag, const char *str)
{
@@ -285,6 +314,9 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp;
int transfer_len = -1;
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
+
if (!trace_ufshcd_command_enabled())
return;
@@ -2550,6 +2582,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
+ ufshcd_add_query_upiu_trace(hba, tag, "query_send");
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2559,6 +2592,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+ ufshcd_add_query_upiu_trace(hba, tag,
+ err ? "query_complete_err" : "query_complete");
+
out_put_tag:
ufshcd_put_dev_cmd_tag(hba, tag);
wake_up(&hba->dev_cmd.tag_wq);
@@ -5443,11 +5479,14 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
spin_unlock_irqrestore(host->host_lock, flags);
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
+
/* wait until the task management command is completed */
err = wait_event_timeout(hba->tm_wq,
test_bit(free_slot, &hba->tm_condition),
msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) {
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
if (ufshcd_clear_tm_cmd(hba, free_slot))
@@ -5456,6 +5495,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
err = -ETIMEDOUT;
} else {
err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
}
clear_bit(free_slot, &hba->tm_condition);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 0d99b242e82e..6cb933ecc084 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
bytes = min(bytes, data_len);
if (!bio) {
+new_bio:
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
nr_pages -= nr_vecs;
/*
@@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* be allocated with pscsi_get_bio() above.
*/
bio = NULL;
+ goto new_bio;
}
data_len -= bytes;
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index a5b8eb21201f..1abe4d021fd2 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
#define WDT_CTRL_WDT_INTR BIT(2)
#define WDT_CTRL_RESET_SYSTEM BIT(1)
#define WDT_CTRL_ENABLE BIT(0)
+#define WDT_TIMEOUT_STATUS 0x10
+#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1)
/*
* WDT_RESET_WIDTH controls the characteristics of the external pulse (if
@@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
struct device_node *np;
const char *reset_type;
u32 duration;
+ u32 status;
int ret;
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
@@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
}
+ status = readl(wdt->base + WDT_TIMEOUT_STATUS);
+ if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY)
+ wdt->wdd.bootstatus = WDIOF_CARDRESET;
+
ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
if (ret) {
dev_err(&pdev->dev, "failed to register\n");
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 6b8c6ddfe30b..514db5cc1595 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action,
}
static const struct watchdog_info rwdt_ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+ WDIOF_CARDRESET,
.identity = "Renesas WDT Watchdog",
};
@@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev)
return PTR_ERR(clk);
pm_runtime_enable(&pdev->dev);
-
pm_runtime_get_sync(&pdev->dev);
priv->clk_rate = clk_get_rate(clk);
+ priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) &
+ RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0;
pm_runtime_put(&pdev->dev);
if (!priv->clk_rate) {
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 43d0cbb7ba0b..814cdf539b0f 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd,
if (sch311x_wdt_set_heartbeat(new_timeout))
return -EINVAL;
sch311x_wdt_keepalive();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 20e2bba10400..672b61a7f9a3 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
wdt_keepalive();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, uarg.i);
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index db0da7ea4fd8..93c5b610e264 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd,
timeout = new_timeout;
wafwdt_stop();
wafwdt_start();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
index 89d9744ece61..ed593d1042a6 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.c
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -95,7 +95,7 @@ int xen_pcibk_config_quirks_init(struct pci_dev *dev)
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
- quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
+ quirk = kzalloc(sizeof(*quirk), GFP_KERNEL);
if (!quirk) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 9e480fdebe1f..59661db144e5 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -71,7 +71,7 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
dev_dbg(&dev->dev, "pcistub_device_alloc\n");
- psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
+ psdev = kzalloc(sizeof(*psdev), GFP_KERNEL);
if (!psdev)
return NULL;
@@ -364,7 +364,7 @@ static int pcistub_init_device(struct pci_dev *dev)
* here and then to call kfree(pci_get_drvdata(psdev->dev)).
*/
dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
- + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
+ + strlen(pci_name(dev)) + 1, GFP_KERNEL);
if (!dev_data) {
err = -ENOMEM;
goto out;
@@ -577,7 +577,7 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
if (!match) {
- pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC);
+ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
if (!pci_dev_id) {
err = -ENOMEM;
goto out;
@@ -1149,7 +1149,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func,
}
dev = psdev->dev;
- field = kzalloc(sizeof(*field), GFP_ATOMIC);
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field) {
err = -ENOMEM;
goto out;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 0d6d9264d6a9..c3e201025ef0 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
{
struct {
struct xsd_sockmsg hdr;
- const char body[16];
+ char body[16];
} msg;
int rc;
@@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
msg.hdr.len = strlen(reply) + 1;
if (msg.hdr.len > sizeof(msg.body))
return -E2BIG;
+ memcpy(&msg.body, reply, msg.hdr.len);
mutex_lock(&u->reply_mutex);
rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);