summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/aoe/aoeblk.c16
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/rsxx/core.c1
-rw-r--r--drivers/dma-buf/udmabuf.c1
-rw-r--r--drivers/dma/dma-jz4780.c32
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c3
-rw-r--r--drivers/dma/fsl-qdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/sprd-dma.c49
-rw-r--r--drivers/dma/tegra210-adma.c57
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c2
-rw-r--r--drivers/fpga/dfl.c22
-rw-r--r--drivers/fpga/stratix10-soc.c6
-rw-r--r--drivers/fpga/zynqmp-fpga.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h10
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c4
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c14
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c13
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c49
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c38
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c51
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/hwmon/hwmon.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c37
-rw-r--r--drivers/i2c/busses/i2c-xiic.c5
-rw-r--r--drivers/infiniband/core/device.c49
-rw-r--r--drivers/infiniband/core/rdma_core.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c30
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c1
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c1
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c1
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c9
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c18
-rw-r--r--drivers/memstick/core/mspro_block.c13
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/habanalabs/context.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c65
-rw-r--r--drivers/misc/habanalabs/device.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c3
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h1
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c31
-rw-r--r--drivers/misc/habanalabs/habanalabs.h2
-rw-r--r--drivers/misc/habanalabs/memory.c6
-rw-r--r--drivers/misc/habanalabs/mmu.c8
-rw-r--r--drivers/misc/lkdtm/bugs.c23
-rw-r--r--drivers/misc/lkdtm/core.c6
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/usercopy.c10
-rw-r--r--drivers/mmc/core/queue.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mmc/host/sdhci.c24
-rw-r--r--drivers/mmc/host/sdhci_am654.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c15
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/phy/phylink.c13
-rw-r--r--drivers/net/phy/sfp.c24
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/nvme/host/rdma.c152
-rw-r--r--drivers/nvme/host/tcp.c57
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c1
-rw-r--r--drivers/parisc/ccio-dma.c6
-rw-r--r--drivers/parisc/sba_iommu.c5
-rw-r--r--drivers/parport/share.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c32
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/vhost/net.c41
-rw-r--r--drivers/vhost/scsi.c21
-rw-r--r--drivers/vhost/vhost.c20
-rw-r--r--drivers/vhost/vhost.h5
-rw-r--r--drivers/vhost/vsock.c28
-rw-r--r--drivers/virtio/Kconfig8
-rw-r--r--drivers/w1/slaves/w1_ds2408.c2
132 files changed, 1251 insertions, 583 deletions
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index e2c6aae2d636..bd19f8af950b 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -196,7 +196,6 @@ static const struct file_operations aoe_debugfs_fops = {
static void
aoedisk_add_debugfs(struct aoedev *d)
{
- struct dentry *entry;
char *p;
if (aoe_debugfs_dir == NULL)
@@ -207,15 +206,8 @@ aoedisk_add_debugfs(struct aoedev *d)
else
p++;
BUG_ON(*p == '\0');
- entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
- &aoe_debugfs_fops);
- if (IS_ERR_OR_NULL(entry)) {
- pr_info("aoe: cannot create debugfs file for %s\n",
- d->gd->disk_name);
- return;
- }
- BUG_ON(d->debugfs);
- d->debugfs = entry;
+ d->debugfs = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
+ &aoe_debugfs_fops);
}
void
aoedisk_rm_debugfs(struct aoedev *d)
@@ -472,10 +464,6 @@ aoeblk_init(void)
if (buf_pool_cache == NULL)
return -ENOMEM;
aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
- if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
- pr_info("aoe: cannot create debugfs directory\n");
- aoe_debugfs_dir = NULL;
- }
return 0;
}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index bacfdac7161c..a14b09ab3a41 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3676,6 +3676,7 @@ skip_create_disk:
blk_queue_physical_block_size(dd->queue, 4096);
blk_queue_max_hw_sectors(dd->queue, 0xffff);
blk_queue_max_segment_size(dd->queue, 0x400000);
+ dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
blk_queue_io_min(dd->queue, 4096);
/* Set the capacity of the device in 512 byte sectors. */
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index de9b2d2f8654..76b73ddf8fd7 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -767,7 +767,6 @@ static int rsxx_pci_probe(struct pci_dev *dev,
goto failed_enable;
pci_set_master(dev);
- dma_set_max_seg_size(&dev->dev, RSXX_HW_BLK_SIZE);
st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
if (st) {
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index cd57747286f2..9635897458a0 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -77,6 +77,7 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
struct sg_table *sg,
enum dma_data_direction direction)
{
+ dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
sg_free_table(sg);
kfree(sg);
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7204fdeff6c5..263bee76ef0d 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -662,10 +662,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
return status;
}
-static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
- struct jz4780_dma_chan *jzchan)
+static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
+ struct jz4780_dma_chan *jzchan)
{
uint32_t dcs;
+ bool ack = true;
spin_lock(&jzchan->vchan.lock);
@@ -688,12 +689,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
if (jzchan->desc->type == DMA_CYCLIC) {
vchan_cyclic_callback(&jzchan->desc->vdesc);
- } else {
+
+ jz4780_dma_begin(jzchan);
+ } else if (dcs & JZ_DMA_DCS_TT) {
vchan_cookie_complete(&jzchan->desc->vdesc);
jzchan->desc = NULL;
- }
- jz4780_dma_begin(jzchan);
+ jz4780_dma_begin(jzchan);
+ } else {
+ /* False positive - continue the transfer */
+ ack = false;
+ jz4780_dma_chn_writel(jzdma, jzchan->id,
+ JZ_DMA_REG_DCS,
+ JZ_DMA_DCS_CTE);
+ }
}
} else {
dev_err(&jzchan->vchan.chan.dev->device,
@@ -701,21 +710,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
}
spin_unlock(&jzchan->vchan.lock);
+
+ return ack;
}
static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
{
struct jz4780_dma_dev *jzdma = data;
+ unsigned int nb_channels = jzdma->soc_data->nb_channels;
uint32_t pending, dmac;
int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
- for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
- if (!(pending & (1<<i)))
- continue;
-
- jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
+ for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
+ if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
+ pending &= ~BIT(i);
}
/* Clear halt and address error status of all channels. */
@@ -724,7 +734,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
/* Clear interrupt pending status. */
- jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index b2ac1d2c5b86..a1ce307c502f 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
return vchan_tx_prep(&chan->vc, &first->vd, flags);
err_desc_get:
- axi_desc_put(first);
+ if (first)
+ axi_desc_put(first);
return NULL;
}
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index aa1d0ae3d207..60b062c3647b 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -701,10 +701,8 @@ static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
- if (intr) {
+ if (intr)
dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
- return IRQ_NONE;
- }
qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
return IRQ_HANDLED;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 814853842e29..723b11c190b3 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -225,7 +225,7 @@ static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc)
mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
- return mtk_cqdma_poll_engine_done(pc, false);
+ return mtk_cqdma_poll_engine_done(pc, true);
}
static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
@@ -671,7 +671,7 @@ static void mtk_cqdma_free_chan_resources(struct dma_chan *c)
mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
/* wait for the completion of flush operation */
- if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0)
+ if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0)
dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
/* clear the flush bit and interrupt flag */
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 48431e2da987..baac476c8622 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -62,6 +62,8 @@
/* SPRD_DMA_GLB_2STAGE_GRP register definition */
#define SPRD_DMA_GLB_2STAGE_EN BIT(24)
#define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
+#define SPRD_DMA_GLB_DEST_INT BIT(22)
+#define SPRD_DMA_GLB_SRC_INT BIT(20)
#define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
#define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
#define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
@@ -135,6 +137,7 @@
/* define DMA channel mode & trigger mode mask */
#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
+#define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0)
/* define the DMA transfer step type */
#define SPRD_DMA_NONE_STEP 0
@@ -190,6 +193,7 @@ struct sprd_dma_chn {
u32 dev_id;
enum sprd_dma_chn_mode chn_mode;
enum sprd_dma_trg_mode trg_mode;
+ enum sprd_dma_int_type int_type;
struct sprd_dma_desc *cur_desc;
};
@@ -429,6 +433,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
val |= SPRD_DMA_GLB_2STAGE_EN;
+ if (schan->int_type != SPRD_DMA_NO_INT)
+ val |= SPRD_DMA_GLB_SRC_INT;
+
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
break;
@@ -436,6 +443,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
val |= SPRD_DMA_GLB_2STAGE_EN;
+ if (schan->int_type != SPRD_DMA_NO_INT)
+ val |= SPRD_DMA_GLB_SRC_INT;
+
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
break;
@@ -443,6 +453,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
SPRD_DMA_GLB_DEST_CHN_MASK;
val |= SPRD_DMA_GLB_2STAGE_EN;
+ if (schan->int_type != SPRD_DMA_NO_INT)
+ val |= SPRD_DMA_GLB_DEST_INT;
+
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
break;
@@ -450,6 +463,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
SPRD_DMA_GLB_DEST_CHN_MASK;
val |= SPRD_DMA_GLB_2STAGE_EN;
+ if (schan->int_type != SPRD_DMA_NO_INT)
+ val |= SPRD_DMA_GLB_DEST_INT;
+
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
break;
@@ -510,7 +526,9 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
sprd_dma_set_uid(schan);
sprd_dma_enable_chn(schan);
- if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
+ if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
+ schan->chn_mode != SPRD_DMA_DST_CHN0 &&
+ schan->chn_mode != SPRD_DMA_DST_CHN1)
sprd_dma_soft_request(schan);
}
@@ -552,12 +570,17 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
schan = &sdev->channels[i];
spin_lock(&schan->vc.lock);
+
+ sdesc = schan->cur_desc;
+ if (!sdesc) {
+ spin_unlock(&schan->vc.lock);
+ return IRQ_HANDLED;
+ }
+
int_type = sprd_dma_get_int_type(schan);
req_type = sprd_dma_get_req_type(schan);
sprd_dma_clear_int(schan);
- sdesc = schan->cur_desc;
-
/* cyclic mode schedule callback */
cyclic = schan->linklist.phy_addr ? true : false;
if (cyclic == true) {
@@ -625,7 +648,7 @@ static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
else
pos = 0;
} else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
- struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
+ struct sprd_dma_desc *sdesc = schan->cur_desc;
if (sdesc->dir == DMA_DEV_TO_MEM)
pos = sprd_dma_get_dst_addr(schan);
@@ -771,7 +794,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
hw->frg_len = temp;
- hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
+ hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
@@ -904,6 +927,16 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
schan->linklist.virt_addr = 0;
}
+ /*
+ * Set channel mode, interrupt mode and trigger mode for 2-stage
+ * transfer.
+ */
+ schan->chn_mode =
+ (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
+ schan->trg_mode =
+ (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
+ schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK;
+
sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
if (!sdesc)
return NULL;
@@ -937,12 +970,6 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
}
- /* Set channel mode and trigger mode for 2-stage transfer */
- schan->chn_mode =
- (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
- schan->trg_mode =
- (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
-
ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
dir, flags, slave_cfg);
if (ret) {
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index d51550dd91c7..2805853e963f 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -42,10 +42,14 @@
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define ADMA_CH_FIFO_CTRL 0x2c
-#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24)
-#define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16)
-#define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT 8
-#define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT 0
+#define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0xf) << 24)
+#define TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0xf) << 16)
+#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
+#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0x1f) << 24)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0x1f) << 16)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
#define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@@ -60,8 +64,15 @@
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
-#define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \
- ADMA_CH_FIFO_CTRL_STARV_THRES(1))
+#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
+ TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
+ TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
+ TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
+
+#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
+ TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
+ TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
+ TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
@@ -73,7 +84,8 @@ struct tegra_adma;
* @global_int_clear: Register offset of DMA global interrupt clear.
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
- * @ch_base_offset: Reister offset of DMA channel registers.
+ * @ch_base_offset: Register offset of DMA channel registers.
+ * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
* @ch_req_mask: Mask for Tx or Rx channel select.
* @ch_req_max: Maximum number of Tx or Rx channels available.
* @ch_reg_size: Size of DMA channel register space.
@@ -86,6 +98,7 @@ struct tegra_adma_chip_data {
unsigned int ch_req_tx_shift;
unsigned int ch_req_rx_shift;
unsigned int ch_base_offset;
+ unsigned int ch_fifo_ctrl;
unsigned int ch_req_mask;
unsigned int ch_req_max;
unsigned int ch_reg_size;
@@ -589,7 +602,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
- ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT;
+ ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
return tegra_adma_request_alloc(tdc, direction);
@@ -773,6 +786,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
.ch_base_offset = 0,
+ .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0xf,
.ch_req_max = 10,
.ch_reg_size = 0x80,
@@ -786,6 +800,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
.ch_base_offset = 0x10000,
+ .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0x1f,
.ch_req_max = 20,
.ch_reg_size = 0x100,
@@ -834,16 +849,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->ahub_clk);
}
- pm_runtime_enable(&pdev->dev);
-
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- goto rpm_disable;
-
- ret = tegra_adma_init(tdma);
- if (ret)
- goto rpm_put;
-
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i];
@@ -862,6 +867,16 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->tdma = tdma;
}
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto rpm_disable;
+
+ ret = tegra_adma_init(tdma);
+ if (ret)
+ goto rpm_put;
+
dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
@@ -905,13 +920,13 @@ static int tegra_adma_probe(struct platform_device *pdev)
dma_remove:
dma_async_device_unregister(&tdma->dma_dev);
-irq_dispose:
- while (--i >= 0)
- irq_dispose_mapping(tdma->channels[i].irq);
rpm_put:
pm_runtime_put_sync(&pdev->dev);
rpm_disable:
pm_runtime_disable(&pdev->dev);
+irq_dispose:
+ while (--i >= 0)
+ irq_dispose_mapping(tdma->channels[i].irq);
return ret;
}
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index c438722bf4e1..dcd80b088c7b 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -399,7 +399,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
region->pages[0], 0,
region->length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&pdata->dev->dev, region->iova)) {
+ if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
dev_err(&pdata->dev->dev, "failed to map for dma\n");
ret = -EFAULT;
goto unpin_pages;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 2c09e502e721..4b66aaa32b5a 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -40,6 +40,13 @@ enum dfl_fpga_devt_type {
DFL_FPGA_DEVT_MAX,
};
+static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
+
+static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
+ "dfl-fme-pdata",
+ "dfl-port-pdata",
+};
+
/**
* dfl_dev_info - dfl feature device information.
* @name: name string of the feature platform device.
@@ -315,7 +322,7 @@ static void dfl_chardev_uinit(void)
for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
if (MAJOR(dfl_chrdevs[i].devt)) {
unregister_chrdev_region(dfl_chrdevs[i].devt,
- MINORMASK);
+ MINORMASK + 1);
dfl_chrdevs[i].devt = MKDEV(0, 0);
}
}
@@ -325,8 +332,8 @@ static int dfl_chardev_init(void)
int i, ret;
for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
- ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK,
- dfl_chrdevs[i].name);
+ ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
+ MINORMASK + 1, dfl_chrdevs[i].name);
if (ret)
goto exit;
}
@@ -443,11 +450,16 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
struct platform_device *fdev = binfo->feature_dev;
struct dfl_feature_platform_data *pdata;
struct dfl_feature_info *finfo, *p;
+ enum dfl_id_type type;
int ret, index = 0;
if (!fdev)
return 0;
+ type = feature_dev_id_type(fdev);
+ if (WARN_ON_ONCE(type >= DFL_ID_MAX))
+ return -EINVAL;
+
/*
* we do not need to care for the memory which is associated with
* the platform device. After calling platform_device_unregister(),
@@ -463,6 +475,8 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
pdata->num = binfo->feature_num;
pdata->dfl_cdev = binfo->cdev;
mutex_init(&pdata->lock);
+ lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
+ dfl_pdata_key_strings[type]);
/*
* the count should be initialized to 0 to make sure
@@ -497,7 +511,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
ret = platform_device_add(binfo->feature_dev);
if (!ret) {
- if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
+ if (type == PORT_ID)
dfl_fpga_cdev_add_port_dev(binfo->cdev,
binfo->feature_dev);
else
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 13851b3d1c56..215d33789c74 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -507,12 +507,16 @@ static int __init s10_init(void)
if (!fw_np)
return -ENODEV;
+ of_node_get(fw_np);
np = of_find_matching_node(fw_np, s10_of_match);
- if (!np)
+ if (!np) {
+ of_node_put(fw_np);
return -ENODEV;
+ }
of_node_put(np);
ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
+ of_node_put(fw_np);
if (ret)
return ret;
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index f7cbaadf49ab..b8a88d21d038 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -47,7 +47,7 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
char *kbuf;
int ret;
- if (!eemi_ops || !eemi_ops->fpga_load)
+ if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
return -ENXIO;
priv = mgr->priv;
@@ -81,7 +81,7 @@ static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 status;
- if (!eemi_ops || !eemi_ops->fpga_get_status)
+ if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
return FPGA_MGR_STATE_UNKNOWN;
eemi_ops->fpga_get_status(&status);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cc8ad3831982..f4ac632a87b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1589,6 +1589,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
{
int r = 0;
int i;
+ uint32_t smu_version;
if (adev->asic_type >= CHIP_VEGA10) {
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1614,16 +1615,9 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
}
}
}
+ r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
- r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
- if (r) {
- pr_err("firmware loading failed\n");
- return r;
- }
- }
-
- return 0;
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 34471dbaa872..039cfa2ec89d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2490,6 +2490,21 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
}
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+{
+ int r = -EINVAL;
+
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
+ r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
+ if (r) {
+ pr_err("smu firmware loading failed\n");
+ return r;
+ }
+ *smu_version = adev->pm.fw_version;
+ }
+ return r;
+}
+
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index f21a7716b90e..7ff0e7621fff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -34,6 +34,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index c021b114c8a4..f7189e22f6b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1072,7 +1072,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ uint32_t rptr;
unsigned i;
int r, timeout = adev->usec_timeout;
@@ -1084,6 +1084,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
if (r)
return r;
+ rptr = amdgpu_ring_get_rptr(ring);
+
amdgpu_ring_write(ring, VCE_CMD_END);
amdgpu_ring_commit(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ba67d1023264..b610e3b30d95 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -28,6 +28,7 @@
#include "soc15.h"
#include "soc15d.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_pm.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
@@ -96,6 +97,7 @@ MODULE_FIRMWARE("amdgpu/raven2_me.bin");
MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
+MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_9_0[] =
{
@@ -588,7 +590,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_RAVEN:
if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
break;
- if ((adev->gfx.rlc_fw_version < 531) ||
+ if ((adev->gfx.rlc_fw_version != 106 &&
+ adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) ||
!adev->gfx.rlc.is_rlc_v2_1)
@@ -612,6 +615,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
+ uint32_t smu_version;
DRM_DEBUG("\n");
@@ -682,6 +686,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+ else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
+ (smu_version >= 0x41e2b))
+ /**
+ *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
+ */
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bcb1a93c0b4c..ab7c5c3004ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4232,8 +4232,7 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *old_state =
drm_atomic_get_old_plane_state(new_state->state, plane);
- if (plane->state->fb != new_state->fb)
- drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ swap(plane->state->fb, new_state->fb);
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 6cd6497c6fc2..f1d326caf69e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -92,6 +92,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr_set_user_specify_caps(hwmgr);
hwmgr->fan_ctrl_is_in_default_mode = true;
hwmgr_init_workload_prority(hwmgr);
+ hwmgr->gfxoff_state_changed_by_workload = false;
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CI:
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 9a595f7525e6..e32ae9d3373c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1258,21 +1258,46 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
return size;
}
+static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ if ((adev->asic_type == CHIP_RAVEN) &&
+ (adev->rev_id != 0x15d8) &&
+ (hwmgr->smu_version >= 0x41e2b))
+ return true;
+ else
+ return false;
+}
+
static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
int workload_type = 0;
+ int result = 0;
if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
pr_err("Invalid power profile mode %ld\n", input[size]);
return -EINVAL;
}
- hwmgr->power_profile_mode = input[size];
+ if (hwmgr->power_profile_mode == input[size])
+ return 0;
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type =
- conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
+ conv_power_profile_to_pplib_workload(input[size]);
+ if (workload_type &&
+ smu10_is_raven1_refresh(hwmgr) &&
+ !hwmgr->gfxoff_state_changed_by_workload) {
+ smu10_gfx_off_control(hwmgr, false);
+ hwmgr->gfxoff_state_changed_by_workload = true;
+ }
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
1 << workload_type);
+ if (!result)
+ hwmgr->power_profile_mode = input[size];
+ if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
+ smu10_gfx_off_control(hwmgr, true);
+ hwmgr->gfxoff_state_changed_by_workload = false;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index bac3d85e3b82..c92999aac07c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -782,6 +782,7 @@ struct pp_hwmgr {
uint32_t workload_mask;
uint32_t workload_prority[Workload_Policy_Max];
uint32_t workload_setting[Workload_Policy_Max];
+ bool gfxoff_state_changed_by_workload;
};
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 031e5f305a3c..6bab816ed8e7 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -245,7 +245,7 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
}
-static struct komeda_component_funcs d71_layer_funcs = {
+static const struct komeda_component_funcs d71_layer_funcs = {
.update = d71_layer_update,
.disable = d71_layer_disable,
.dump_register = d71_layer_dump,
@@ -391,7 +391,7 @@ static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]);
}
-static struct komeda_component_funcs d71_compiz_funcs = {
+static const struct komeda_component_funcs d71_compiz_funcs = {
.update = d71_compiz_update,
.disable = d71_component_disable,
.dump_register = d71_compiz_dump,
@@ -467,7 +467,7 @@ static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
}
-static struct komeda_component_funcs d71_improc_funcs = {
+static const struct komeda_component_funcs d71_improc_funcs = {
.update = d71_improc_update,
.disable = d71_component_disable,
.dump_register = d71_improc_dump,
@@ -580,7 +580,7 @@ static void d71_timing_ctrlr_dump(struct komeda_component *c,
seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]);
}
-static struct komeda_component_funcs d71_timing_ctrlr_funcs = {
+static const struct komeda_component_funcs d71_timing_ctrlr_funcs = {
.update = d71_timing_ctrlr_update,
.disable = d71_timing_ctrlr_disable,
.dump_register = d71_timing_ctrlr_dump,
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index 34506ef7ad40..3a7248d42376 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -502,7 +502,7 @@ static void d71_init_fmt_tbl(struct komeda_dev *mdev)
table->n_formats = ARRAY_SIZE(d71_format_caps_table);
}
-static struct komeda_dev_funcs d71_chip_funcs = {
+static const struct komeda_dev_funcs d71_chip_funcs = {
.init_format_table = d71_init_fmt_tbl,
.enum_resources = d71_enum_resources,
.cleanup = d71_cleanup,
@@ -514,7 +514,7 @@ static struct komeda_dev_funcs d71_chip_funcs = {
.flush = d71_flush,
};
-struct komeda_dev_funcs *
+const struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
{
chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 62fad59f5a6a..284ce079d8c4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -350,7 +350,7 @@ static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+static const struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
.atomic_check = komeda_crtc_atomic_check,
.atomic_flush = komeda_crtc_atomic_flush,
.atomic_enable = komeda_crtc_atomic_enable,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index ca3599e4a4d3..b67030a9f056 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -249,6 +250,9 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto err_cleanup;
}
+ dev->dma_parms = &mdev->dma_parms;
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
if (err) {
DRM_ERROR("create sysfs group failed.\n");
@@ -269,7 +273,7 @@ err_cleanup:
void komeda_dev_destroy(struct komeda_dev *mdev)
{
struct device *dev = mdev->dev;
- struct komeda_dev_funcs *funcs = mdev->funcs;
+ const struct komeda_dev_funcs *funcs = mdev->funcs;
int i;
sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index 29e03c4e1ffc..973fd5e0eb98 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -60,7 +60,7 @@ struct komeda_chip_info {
struct komeda_product_data {
u32 product_id;
- struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
+ const struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
struct komeda_chip_info *info);
};
@@ -149,6 +149,8 @@ struct komeda_dev {
struct device *dev;
/** @reg_base: the base address of komeda io space */
u32 __iomem *reg_base;
+ /** @dma_parms: the dma parameters of komeda */
+ struct device_dma_parameters dma_parms;
/** @chip: the basic chip information */
struct komeda_chip_info chip;
@@ -173,7 +175,7 @@ struct komeda_dev {
struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES];
/** @funcs: chip funcs to access to HW */
- struct komeda_dev_funcs *funcs;
+ const struct komeda_dev_funcs *funcs;
/**
* @chip_data:
*
@@ -192,7 +194,7 @@ komeda_product_match(struct komeda_dev *mdev, u32 target)
return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
}
-struct komeda_dev_funcs *
+const struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
struct komeda_dev *komeda_dev_create(struct device *dev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index c379439c6194..a130b62fa6d1 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -12,7 +12,7 @@
/** komeda_pipeline_add - Add a pipeline to &komeda_dev */
struct komeda_pipeline *
komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
- struct komeda_pipeline_funcs *funcs)
+ const struct komeda_pipeline_funcs *funcs)
{
struct komeda_pipeline *pipe;
@@ -130,7 +130,7 @@ komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id)
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
size_t comp_sz, u32 id, u32 hw_id,
- struct komeda_component_funcs *funcs,
+ const struct komeda_component_funcs *funcs,
u8 max_active_inputs, u32 supported_inputs,
u8 max_active_outputs, u32 __iomem *reg,
const char *name_fmt, ...)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index b1f813a349a4..bae8a32b81a6 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -124,7 +124,7 @@ struct komeda_component {
/**
* @funcs: chip functions to access HW
*/
- struct komeda_component_funcs *funcs;
+ const struct komeda_component_funcs *funcs;
};
/**
@@ -346,8 +346,8 @@ struct komeda_pipeline {
struct komeda_improc *improc;
/** @ctrlr: timing controller */
struct komeda_timing_ctrlr *ctrlr;
- /** @funcs: chip pipeline functions */
- struct komeda_pipeline_funcs *funcs; /* private pipeline functions */
+ /** @funcs: chip private pipeline functions */
+ const struct komeda_pipeline_funcs *funcs;
/** @of_node: pipeline dt node */
struct device_node *of_node;
@@ -397,7 +397,7 @@ struct komeda_pipeline_state {
/* pipeline APIs */
struct komeda_pipeline *
komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
- struct komeda_pipeline_funcs *funcs);
+ const struct komeda_pipeline_funcs *funcs);
void komeda_pipeline_destroy(struct komeda_dev *mdev,
struct komeda_pipeline *pipe);
int komeda_assemble_pipelines(struct komeda_dev *mdev);
@@ -411,7 +411,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
size_t comp_sz, u32 id, u32 hw_id,
- struct komeda_component_funcs *funcs,
+ const struct komeda_component_funcs *funcs,
u8 max_active_inputs, u32 supported_inputs,
u8 max_active_outputs, u32 __iomem *reg,
const char *name_fmt, ...);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 07ed0cc1bc44..c97062bdd69b 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -55,7 +55,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
struct komeda_plane_state *kplane_st = to_kplane_st(state);
struct komeda_layer *layer = kplane->layer;
struct drm_crtc_state *crtc_st;
- struct komeda_crtc *kcrtc;
struct komeda_crtc_state *kcrtc_st;
struct komeda_data_flow_cfg dflow;
int err;
@@ -64,7 +63,7 @@ komeda_plane_atomic_check(struct drm_plane *plane,
return 0;
crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
- if (!crtc_st->enable) {
+ if (IS_ERR(crtc_st) || !crtc_st->enable) {
DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
return -EINVAL;
}
@@ -73,7 +72,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
if (!crtc_st->active)
return 0;
- kcrtc = to_kcrtc(state->crtc);
kcrtc_st = to_kcrtc_st(crtc_st);
err = komeda_plane_init_data_flow(state, &dflow);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 0b2b62f8fa3c..a3efa28436ea 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
clk_disable_unprepare(hdlcd->clk);
}
-static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
- struct drm_display_mode *mode = &state->adjusted_mode;
long rate, clk_rate = mode->clock * 1000;
rate = clk_round_rate(hdlcd->clk, clk_rate);
- if (rate != clk_rate) {
+ /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */
+ if (abs(rate - clk_rate) * 1000 > clk_rate) {
/* clock required by mode not supported by hardware */
- return -EINVAL;
+ return MODE_NOCLOCK;
}
- return 0;
+ return MODE_OK;
}
static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
- .atomic_check = hdlcd_crtc_atomic_check,
+ .mode_valid = hdlcd_crtc_mode_valid,
.atomic_begin = hdlcd_crtc_atomic_begin,
.atomic_enable = hdlcd_crtc_atomic_enable,
.atomic_disable = hdlcd_crtc_atomic_disable,
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 3ecf8ddc5130..af1992f06a1d 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -188,6 +188,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
{
struct drm_device *drm = state->dev;
struct malidp_drm *malidp = drm->dev_private;
+ int loop = 5;
malidp->event = malidp->crtc.state->event;
malidp->crtc.state->event = NULL;
@@ -202,8 +203,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
drm_crtc_vblank_get(&malidp->crtc);
/* only set config_valid if the CRTC is enabled */
- if (malidp_set_and_wait_config_valid(drm) < 0)
+ if (malidp_set_and_wait_config_valid(drm) < 0) {
+ /*
+ * make a loop around the second CVAL setting and
+ * try 5 times before giving up.
+ */
+ while (loop--) {
+ if (!malidp_set_and_wait_config_valid(drm))
+ break;
+ }
DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+ }
+
} else if (malidp->event) {
/* CRTC inactive means vblank IRQ is disabled, send event directly */
spin_lock_irq(&drm->event_lock);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2e0cb4246cbd..22a5c617f670 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1607,15 +1607,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
- /*
- * FIXME: Since prepare_fb and cleanup_fb are always called on
- * the new_plane_state for async updates we need to block framebuffer
- * changes. This prevents use of a fb that's been cleaned up and
- * double cleanups from occuring.
- */
- if (old_plane_state->fb != new_plane_state->fb)
- return -EINVAL;
-
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
@@ -1646,6 +1637,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check);
* drm_atomic_async_check() succeeds. Async commits are not supposed to swap
* the states like normal sync commits, but just do in-place changes on the
* current state.
+ *
+ * TODO: Implement full swap instead of doing in-place changes.
*/
void drm_atomic_helper_async_commit(struct drm_device *dev,
struct drm_atomic_state *state)
@@ -1656,6 +1649,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
int i;
for_each_new_plane_in_state(state, plane, plane_state, i) {
+ struct drm_framebuffer *new_fb = plane_state->fb;
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
funcs = plane->helper_private;
funcs->atomic_async_update(plane, plane_state);
@@ -1664,11 +1660,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
* plane->state in-place, make sure at least common
* properties have been properly updated.
*/
- WARN_ON_ONCE(plane->state->fb != plane_state->fb);
+ WARN_ON_ONCE(plane->state->fb != new_fb);
WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
+
+ /*
+ * Make sure the FBs have been swapped so that cleanups in the
+ * new_state performs a cleanup in the old FB.
+ */
+ WARN_ON_ONCE(plane_state->fb != old_fb);
}
}
EXPORT_SYMBOL(drm_atomic_helper_async_commit);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 5cb59c0b4bbe..de5347725564 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2530,7 +2530,7 @@ static const struct cmd_info cmd_info[] = {
0, 12, NULL},
{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
- 0, 20, NULL},
+ 0, 12, NULL},
};
static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 244ad1729764..53115bdae12b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192;
*/
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{
- if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
- && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
- gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
- addr, size);
- return false;
- }
- return true;
+ if (size == 0)
+ return vgpu_gmadr_is_valid(vgpu, addr);
+
+ if (vgpu_gmadr_is_aperture(vgpu, addr) &&
+ vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
+ return true;
+ else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
+ vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
+ return true;
+
+ gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
+ addr, size);
+ return false;
}
/* translate a guest gmadr to host gmadr */
@@ -942,7 +948,16 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
- cur_pt_type = get_next_pt_type(e->type) + 1;
+ cur_pt_type = get_next_pt_type(e->type);
+
+ if (!gtt_type_is_pt(cur_pt_type) ||
+ !gtt_type_is_pt(cur_pt_type + 1)) {
+ WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+ return -EINVAL;
+ }
+
+ cur_pt_type += 1;
+
if (ops->get_pfn(e) ==
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
return 0;
@@ -1102,6 +1117,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
err_free_spt:
ppgtt_free_spt(spt);
+ spt = NULL;
err:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type);
@@ -2183,7 +2199,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma, gfn;
- struct intel_gvt_gtt_entry e, m;
+ struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
+ struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
dma_addr_t dma_addr;
int ret;
struct intel_gvt_partial_pte *partial_pte, *pos, *n;
@@ -2250,7 +2267,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (!partial_update && (ops->test_present(&e))) {
gfn = ops->get_pfn(&e);
- m = e;
+ m.val64 = e.val64;
+ m.type = e.type;
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index e09bd6e0cc4d..a6ade66349bd 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0x2690),
_MMIO(0x2694),
_MMIO(0x2698),
+ _MMIO(0x2754),
+ _MMIO(0x28a0),
_MMIO(0x4de0),
_MMIO(0x4de4),
_MMIO(0x4dfc),
@@ -1690,8 +1692,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist;
int ret;
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
+ if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
+ if (data & _MASKED_BIT_ENABLE(1)) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
+
+ if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+ data & _MASKED_BIT_ENABLE(2)) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
+
/* when PPGTT mode enabled, we will check if guest has called
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
@@ -1773,6 +1789,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
return 0;
}
+static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+
+ return 0;
+}
+
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
f, s, am, rm, d, r, w); \
@@ -1893,7 +1924,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2997,7 +3029,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
- MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
+ MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(SKL_DFSM, D_SKL_PLUS);
MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
@@ -3010,8 +3042,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
- MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
- NULL, NULL);
+ MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
@@ -3030,7 +3062,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
- MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
+ MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
@@ -3059,7 +3091,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
- MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
+ MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, csfe_chicken1_mmio_write);
+#undef CSFE_CHICKEN1_REG
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
@@ -3239,7 +3274,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
MMIO_D(GEN6_GFXPAUSE, D_BXT);
- MMIO_D(GEN8_L3SQCREG1, D_BXT);
+ MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 33aaa14bfdde..5b66e14c5b7b 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -102,6 +102,8 @@
#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
#define FORCEWAKE_ACK_HSW_REG 0x130044
+#define RB_HEAD_WRAP_CNT_MAX ((1 << 11) - 1)
+#define RB_HEAD_WRAP_CNT_OFF 21
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 13632dba8b2a..0f919f0a43d4 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -812,10 +812,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
void *src;
unsigned long context_gpa, context_page_num;
int i;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ u32 ring_base;
+ u32 head, tail;
+ u16 wrap_count;
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca);
+ head = workload->rb_head;
+ tail = workload->rb_tail;
+ wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
+
+ if (tail < head) {
+ if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
+ wrap_count = 0;
+ else
+ wrap_count += 1;
+ }
+
+ head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
+
+ ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
+ vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
+
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
@@ -1415,6 +1436,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ u32 guest_head;
int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
@@ -1430,6 +1452,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
+ guest_head = head;
+
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
@@ -1462,6 +1486,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
+ workload->guest_rb_head = guest_head;
workload->rb_tail = tail;
workload->rb_start = start;
workload->rb_ctl = ctl;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 90c6756f5453..c50d14a9ce85 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -100,6 +100,7 @@ struct intel_vgpu_workload {
struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+ unsigned long guest_rb_head;
bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 249d35c12a75..2aa69d347ec4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7620,6 +7620,9 @@ enum {
#define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+#define GEN8_L3CNTLREG _MMIO(0x7034)
+ #define GEN8_ERRDETBCTRL (1 << 9)
+
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 6decd432f4d3..841b8e515f4d 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -518,6 +518,12 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->ctx_wa_list;
+ /* WaDisableBankHangMode:icl */
+ wa_write(wal,
+ GEN8_L3CNTLREG,
+ intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
+ GEN8_ERRDETBCTRL);
+
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index be13140967b4..b854f471e9e5 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -502,6 +502,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->crtc_x = new_state->crtc_x;
@@ -524,6 +526,8 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
*to_mdp5_plane_state(plane->state) =
*to_mdp5_plane_state(new_state);
+
+ new_state->fb = old_fb;
}
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index ff0fa38aee72..54da9c6bc8d5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIRMWARE_H__
#define __NVKM_FIRMWARE_H__
-
-#include <core/device.h>
-
-int nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
- const struct firmware **fw);
-
-void nvkm_firmware_put(const struct firmware *fw);
-
+#include <core/subdev.h>
+
+int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname,
+ int min_version, int max_version,
+ const struct firmware **);
+int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname,
+ const struct firmware **);
+void nvkm_firmware_put(const struct firmware *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 058ff46b5f16..092acdec2c39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -24,7 +24,7 @@
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
- * @device device that will use that firmware
+ * @subdev subdevice that will use that firmware
* @fwname name of firmware file to load
* @fw firmware structure to load to
*
@@ -32,9 +32,11 @@
* Firmware files released by NVIDIA will always follow this format.
*/
int
-nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
- const struct firmware **fw)
+nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
+ int min_version, int max_version,
+ const struct firmware **fw)
{
+ struct nvkm_device *device = subdev->device;
char f[64];
char cname[16];
int i;
@@ -48,8 +50,29 @@ nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
cname[i] = tolower(cname[i]);
}
- snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
- return request_firmware(fw, f, device->dev);
+ for (i = max_version; i >= min_version; i--) {
+ if (i != 0)
+ snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i);
+ else
+ snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+
+ if (!firmware_request_nowarn(fw, f, device->dev)) {
+ nvkm_debug(subdev, "firmware \"%s\" loaded\n", f);
+ return i;
+ }
+
+ nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
+ }
+
+ nvkm_error(subdev, "failed to load firmware \"%s\"", fwname);
+ return -ENOENT;
+}
+
+int
+nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname,
+ const struct firmware **fw)
+{
+ return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw);
}
/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 81a13cf9a292..c578deb5867a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2115,12 +2115,10 @@ int
gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
struct gf100_gr_fuc *fuc)
{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
const struct firmware *fw;
int ret;
- ret = nvkm_firmware_get(device, fwname, &fw);
+ ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw);
if (ret) {
ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
index 75dc06557877..dc80985cf093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
@@ -36,7 +36,7 @@ nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
void *blob;
int ret;
- ret = nvkm_firmware_get(subdev->device, name, &fw);
+ ret = nvkm_firmware_get(subdev, name, &fw);
if (ret)
return ERR_PTR(ret);
if (fw->size < min_size) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 1df09ed6fe6d..4fd4cfe459b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -229,6 +229,8 @@ struct acr_r352_lsf_wpr_header {
struct ls_ucode_img_r352 {
struct ls_ucode_img base;
+ const struct acr_r352_lsf_func *func;
+
struct acr_r352_lsf_wpr_header wpr_header;
struct acr_r352_lsf_lsb_header lsb_header;
};
@@ -243,6 +245,7 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
+ const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
struct ls_ucode_img_r352 *img;
int ret;
@@ -252,15 +255,16 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
- ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
-
- if (ret) {
+ ret = func->load(sb, func->version_max, &img->base);
+ if (ret < 0) {
kfree(img->base.ucode_data);
kfree(img->base.sig);
kfree(img);
return ERR_PTR(ret);
}
+ img->func = func->version[ret];
+
/* Check that the signature size matches our expectations... */
if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
nvkm_error(subdev, "invalid signature size for %s falcon!\n",
@@ -302,8 +306,7 @@ acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_ls_func *func =
- acr->func->ls_func[_img->falcon_id];
+ const struct acr_r352_lsf_func *func = img->func;
/* Fill WPR header */
whdr->falcon_id = _img->falcon_id;
@@ -419,8 +422,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
/* Figure out how large we need gdesc to be. */
list_for_each_entry(_img, imgs, node) {
- const struct acr_r352_ls_func *ls_func =
- acr->func->ls_func[_img->falcon_id];
+ struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
+ const struct acr_r352_lsf_func *ls_func = img->func;
max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
}
@@ -433,8 +436,7 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
list_for_each_entry(_img, imgs, node) {
struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
- const struct acr_r352_ls_func *ls_func =
- acr->func->ls_func[_img->falcon_id];
+ const struct acr_r352_lsf_func *ls_func = img->func;
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -1063,20 +1065,36 @@ acr_r352_dtor(struct nvkm_acr *_acr)
kfree(acr);
}
+static const struct acr_r352_lsf_func
+acr_r352_ls_fecs_func_0 = {
+ .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r352_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
+ .version_max = 0,
+ .version = {
+ &acr_r352_ls_fecs_func_0,
+ }
+};
+
+static const struct acr_r352_lsf_func
+acr_r352_ls_gpccs_func_0 = {
.generate_bl_desc = acr_r352_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
const struct acr_r352_ls_func
acr_r352_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
- .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+ .version_max = 0,
+ .version = {
+ &acr_r352_ls_gpccs_func_0,
+ }
};
@@ -1150,12 +1168,20 @@ acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
desc->argv = addr_args;
}
+static const struct acr_r352_lsf_func
+acr_r352_ls_pmu_func_0 = {
+ .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
+};
+
static const struct acr_r352_ls_func
acr_r352_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
- .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
.post_run = acr_ls_pmu_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r352_ls_pmu_func_0,
+ }
};
const struct acr_r352_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
index 3d58ab871563..e516cab849dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -47,24 +47,34 @@ hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
}
/**
- * struct acr_r352_ls_func - manages a single LS firmware
+ * struct acr_r352_lsf_func - manages a specific LS firmware version
*
- * @load: load the external firmware into a ls_ucode_img
* @generate_bl_desc: function called on a block of bl_desc_size to generate the
* proper bootloader descriptor for this LS firmware
* @bl_desc_size: size of the bootloader descriptor
- * @post_run: hook called right after the ACR is executed
* @lhdr_flags: LS flags
*/
-struct acr_r352_ls_func {
- int (*load)(const struct nvkm_secboot *, struct ls_ucode_img *);
+struct acr_r352_lsf_func {
void (*generate_bl_desc)(const struct nvkm_acr *,
const struct ls_ucode_img *, u64, void *);
u32 bl_desc_size;
- int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
u32 lhdr_flags;
};
+/**
+ * struct acr_r352_ls_func - manages a single LS falcon
+ *
+ * @load: load the external firmware into a ls_ucode_img
+ * @post_run: hook called right after the ACR is executed
+ */
+struct acr_r352_ls_func {
+ int (*load)(const struct nvkm_secboot *, int maxver,
+ struct ls_ucode_img *);
+ int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
+ int version_max;
+ const struct acr_r352_lsf_func *version[];
+};
+
struct acr_r352;
/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
index 14b36ef93628..f6b2d20d7fc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
@@ -66,20 +66,36 @@ acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
bl_desc->data_size = hdr->data_size;
}
+static const struct acr_r352_lsf_func
+acr_r361_ls_fecs_func_0 = {
+ .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r361_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
+ .version_max = 0,
+ .version = {
+ &acr_r361_ls_fecs_func_0,
+ }
+};
+
+static const struct acr_r352_lsf_func
+acr_r361_ls_gpccs_func_0 = {
.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
const struct acr_r352_ls_func
acr_r361_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
- .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+ .version_max = 0,
+ .version = {
+ &acr_r361_ls_gpccs_func_0,
+ }
};
struct acr_r361_pmu_bl_desc {
@@ -125,12 +141,20 @@ acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
desc->argv = addr_args;
}
+static const struct acr_r352_lsf_func
+acr_r361_ls_pmu_func_0 = {
+ .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r361_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
- .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
.post_run = acr_ls_pmu_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r361_ls_pmu_func_0,
+ }
};
static void
@@ -164,12 +188,20 @@ acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
desc->argv = 0x01000000;
}
-const struct acr_r352_ls_func
-acr_r361_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
+const struct acr_r352_lsf_func
+acr_r361_ls_sec2_func_0 = {
.generate_bl_desc = acr_r361_generate_sec2_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
+};
+
+static const struct acr_r352_ls_func
+acr_r361_ls_sec2_func = {
+ .load = acr_ls_ucode_load_sec2,
.post_run = acr_ls_sec2_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r361_ls_sec2_func_0,
+ }
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
index f9f978daadb9..38dec93779c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
@@ -67,6 +67,5 @@ void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
-extern const struct acr_r352_ls_func acr_r361_ls_sec2_func;
-
+extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
index 978ad0790367..472ced29da7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -22,6 +22,7 @@
#include "acr_r367.h"
#include "acr_r361.h"
+#include "acr_r370.h"
#include <core/gpuobj.h>
@@ -100,6 +101,8 @@ struct acr_r367_lsf_wpr_header {
struct ls_ucode_img_r367 {
struct ls_ucode_img base;
+ const struct acr_r352_lsf_func *func;
+
struct acr_r367_lsf_wpr_header wpr_header;
struct acr_r367_lsf_lsb_header lsb_header;
};
@@ -111,6 +114,7 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
+ const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
struct ls_ucode_img_r367 *img;
int ret;
@@ -120,14 +124,16 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
- ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
- if (ret) {
+ ret = func->load(sb, func->version_max, &img->base);
+ if (ret < 0) {
kfree(img->base.ucode_data);
kfree(img->base.sig);
kfree(img);
return ERR_PTR(ret);
}
+ img->func = func->version[ret];
+
/* Check that the signature size matches our expectations... */
if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
nvkm_error(subdev, "invalid signature size for %s falcon!\n",
@@ -158,8 +164,7 @@ acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_ls_func *func =
- acr->func->ls_func[_img->falcon_id];
+ const struct acr_r352_lsf_func *func = img->func;
/* Fill WPR header */
whdr->falcon_id = _img->falcon_id;
@@ -269,8 +274,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
u8 *gdesc;
list_for_each_entry(_img, imgs, node) {
- const struct acr_r352_ls_func *ls_func =
- acr->func->ls_func[_img->falcon_id];
+ struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
+ const struct acr_r352_lsf_func *ls_func = img->func;
max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
}
@@ -283,8 +288,7 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
list_for_each_entry(_img, imgs, node) {
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
- const struct acr_r352_ls_func *ls_func =
- acr->func->ls_func[_img->falcon_id];
+ const struct acr_r352_lsf_func *ls_func = img->func;
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -378,6 +382,17 @@ acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
}
}
+static const struct acr_r352_ls_func
+acr_r367_ls_sec2_func = {
+ .load = acr_ls_ucode_load_sec2,
+ .post_run = acr_ls_sec2_post_run,
+ .version_max = 1,
+ .version = {
+ &acr_r361_ls_sec2_func_0,
+ &acr_r370_ls_sec2_func_0,
+ }
+};
+
const struct acr_r352_func
acr_r367_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
@@ -391,7 +406,7 @@ acr_r367_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
+ [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
index 2f890dfae7fc..e821d0fd6217 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
@@ -49,20 +49,36 @@ acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
desc->data_size = pdesc->app_resident_data_size;
}
+static const struct acr_r352_lsf_func
+acr_r370_ls_fecs_func_0 = {
+ .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r370_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
+ .version_max = 0,
+ .version = {
+ &acr_r370_ls_fecs_func_0,
+ }
+};
+
+static const struct acr_r352_lsf_func
+acr_r370_ls_gpccs_func_0 = {
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
const struct acr_r352_ls_func
acr_r370_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
- .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+ .version_max = 0,
+ .version = {
+ &acr_r370_ls_gpccs_func_0,
+ }
};
static void
@@ -95,12 +111,20 @@ acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
desc->argv = 0x01000000;
}
+const struct acr_r352_lsf_func
+acr_r370_ls_sec2_func_0 = {
+ .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r370_ls_sec2_func = {
.load = acr_ls_ucode_load_sec2,
- .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.post_run = acr_ls_sec2_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r370_ls_sec2_func_0,
+ }
};
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
index 3426f86a15e4..2efed6f995ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
@@ -46,4 +46,5 @@ struct acr_r370_flcn_bl_desc {
void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
+extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
index 7bdef93cb7ae..8f0647766038 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
@@ -54,12 +54,20 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
desc->argv = addr_args;
}
+static const struct acr_r352_lsf_func
+acr_r375_ls_pmu_func_0 = {
+ .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r375_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
- .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.post_run = acr_ls_pmu_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r375_ls_pmu_func_0,
+ }
};
const struct acr_r352_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
index 9b7c402594e8..d43f906da3a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -147,11 +147,15 @@ struct fw_bl_desc {
u32 data_size;
};
-int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *);
-int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *);
-int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
+ struct ls_ucode_img *);
+int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
+ struct ls_ucode_img *);
+int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
+ struct ls_ucode_img *);
int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
+ struct ls_ucode_img *);
int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index 1b0c793c0192..821d3b2bdb1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -90,30 +90,30 @@ ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
* blob. Also generate the corresponding ucode descriptor.
*/
static int
-ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
- const char *falcon_name)
+ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
+ struct ls_ucode_img *img, const char *falcon_name)
{
const struct firmware *bl, *code, *data, *sig;
char f[64];
int ret;
snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &bl);
+ ret = nvkm_firmware_get(subdev, f, &bl);
if (ret)
goto error;
snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &code);
+ ret = nvkm_firmware_get(subdev, f, &code);
if (ret)
goto free_bl;
snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &data);
+ ret = nvkm_firmware_get(subdev, f, &data);
if (ret)
goto free_inst;
snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &sig);
+ ret = nvkm_firmware_get(subdev, f, &sig);
if (ret)
goto free_data;
@@ -146,13 +146,15 @@ error:
}
int
-acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
+ struct ls_ucode_img *img)
{
- return ls_ucode_img_load_gr(&sb->subdev, img, "fecs");
+ return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
}
int
-acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
+ struct ls_ucode_img *img)
{
- return ls_ucode_img_load_gr(&sb->subdev, img, "gpccs");
+ return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index 1e1f1c635cab..77c13b096a67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -39,32 +39,32 @@
*/
static int
acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
- struct ls_ucode_img *img)
+ int maxver, struct ls_ucode_img *img)
{
const struct firmware *image, *desc, *sig;
char f[64];
- int ret;
+ int ver, ret;
snprintf(f, sizeof(f), "%s/image", name);
- ret = nvkm_firmware_get(subdev->device, f, &image);
- if (ret)
- return ret;
+ ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
+ if (ver < 0)
+ return ver;
img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
nvkm_firmware_put(image);
if (!img->ucode_data)
return -ENOMEM;
snprintf(f, sizeof(f), "%s/desc", name);
- ret = nvkm_firmware_get(subdev->device, f, &desc);
- if (ret)
+ ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
+ if (ret < 0)
return ret;
memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
nvkm_firmware_put(desc);
snprintf(f, sizeof(f), "%s/sig", name);
- ret = nvkm_firmware_get(subdev->device, f, &sig);
- if (ret)
+ ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
+ if (ret < 0)
return ret;
img->sig_size = sig->size;
img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
@@ -72,7 +72,7 @@ acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
if (!img->sig)
return -ENOMEM;
- return 0;
+ return ver;
}
static int
@@ -99,12 +99,13 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
}
int
-acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
+ struct ls_ucode_img *img)
{
struct nvkm_pmu *pmu = sb->subdev.device->pmu;
int ret;
- ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", img);
+ ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
if (ret)
return ret;
@@ -136,14 +137,15 @@ acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
}
int
-acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
+ struct ls_ucode_img *img)
{
struct nvkm_sec2 *sec = sb->subdev.device->sec2;
- int ret;
+ int ver, ret;
- ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", img);
- if (ret)
- return ret;
+ ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
+ if (ver < 0)
+ return ver;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
@@ -151,7 +153,7 @@ acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
if (ret)
return ret;
- return 0;
+ return ver;
}
int
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 447e96f9d259..12ed5265a90b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -916,29 +916,17 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct vop *vop = to_vop(plane->state->crtc);
- struct drm_plane_state *plane_state;
-
- plane_state = plane->funcs->atomic_duplicate_state(plane);
- plane_state->crtc_x = new_state->crtc_x;
- plane_state->crtc_y = new_state->crtc_y;
- plane_state->crtc_h = new_state->crtc_h;
- plane_state->crtc_w = new_state->crtc_w;
- plane_state->src_x = new_state->src_x;
- plane_state->src_y = new_state->src_y;
- plane_state->src_h = new_state->src_h;
- plane_state->src_w = new_state->src_w;
-
- if (plane_state->fb != new_state->fb)
- drm_atomic_set_fb_for_plane(plane_state, new_state->fb);
-
- swap(plane_state, plane->state);
-
- if (plane->state->fb && plane->state->fb != new_state->fb) {
- drm_framebuffer_get(plane->state->fb);
- WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
- drm_flip_work_queue(&vop->fb_unref_work, plane->state->fb);
- set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
- }
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_h = new_state->src_h;
+ plane->state->src_w = new_state->src_w;
+ swap(plane->state->fb, new_state->fb);
if (vop->is_enabled) {
rockchip_drm_psr_inhibit_get_state(new_state->state);
@@ -947,9 +935,22 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
rockchip_drm_psr_inhibit_put_state(new_state->state);
- }
- plane->funcs->atomic_destroy_state(plane, plane_state);
+ /*
+ * A scanout can still be occurring, so we can't drop the
+ * reference to the old framebuffer. To solve this we get a
+ * reference to old_fb and set a worker to release it later.
+ * FIXME: if we perform 500 async_update calls before the
+ * vblank, then we can have 500 different framebuffers waiting
+ * to be released.
+ */
+ if (old_fb && plane->state->fb != old_fb) {
+ drm_framebuffer_get(old_fb);
+ WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
+ drm_flip_work_queue(&vop->fb_unref_work, old_fb);
+ set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
+ }
+ }
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4d918d3e4858..afc80b245ea3 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1025,7 +1025,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
{
struct vc4_plane_state *vc4_state, *new_vc4_state;
- drm_atomic_set_fb_for_plane(plane->state, state->fb);
+ swap(plane->state->fb, state->fb);
plane->state->crtc_x = state->crtc_x;
plane->state->crtc_y = state->crtc_y;
plane->state->crtc_w = state->crtc_w;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 35d58736a3ed..05e120e01cb4 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -633,7 +633,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (err)
goto free_hwmon;
- if (dev && chip && chip->ops->read &&
+ if (dev && dev->of_node && chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
const struct hwmon_channel_info **info = chip->info;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index ef7ee90ee785..8470097907bc 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1217,7 +1217,8 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
const struct pmbus_driver_info *info,
const char *name,
int index, int page,
- const struct pmbus_sensor_attr *attr)
+ const struct pmbus_sensor_attr *attr,
+ bool paged)
{
struct pmbus_sensor *base;
bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */
@@ -1225,7 +1226,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
if (attr->label) {
ret = pmbus_add_label(data, name, index, attr->label,
- attr->paged ? page + 1 : 0);
+ paged ? page + 1 : 0);
if (ret)
return ret;
}
@@ -1258,6 +1259,30 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
return 0;
}
+static bool pmbus_sensor_is_paged(const struct pmbus_driver_info *info,
+ const struct pmbus_sensor_attr *attr)
+{
+ int p;
+
+ if (attr->paged)
+ return true;
+
+ /*
+ * Some attributes may be present on more than one page despite
+ * not being marked with the paged attribute. If that is the case,
+ * then treat the sensor as being paged and add the page suffix to the
+ * attribute name.
+ * We don't just add the paged attribute to all such attributes, in
+ * order to maintain the un-suffixed labels in the case where the
+ * attribute is only on page 0.
+ */
+ for (p = 1; p < info->pages; p++) {
+ if (info->func[p] & attr->func)
+ return true;
+ }
+ return false;
+}
+
static int pmbus_add_sensor_attrs(struct i2c_client *client,
struct pmbus_data *data,
const char *name,
@@ -1271,14 +1296,15 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client,
index = 1;
for (i = 0; i < nattrs; i++) {
int page, pages;
+ bool paged = pmbus_sensor_is_paged(info, attrs);
- pages = attrs->paged ? info->pages : 1;
+ pages = paged ? info->pages : 1;
for (page = 0; page < pages; page++) {
if (!(info->func[page] & attrs->func))
continue;
ret = pmbus_add_sensor_attrs_one(client, data, info,
name, index, page,
- attrs);
+ attrs, paged);
if (ret)
return ret;
index++;
@@ -1942,11 +1968,14 @@ static ssize_t pmbus_set_samples(struct device *dev,
long val;
struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_samples_reg *reg = to_samples_reg(devattr);
+ struct pmbus_data *data = i2c_get_clientdata(client);
if (kstrtol(buf, 0, &val) < 0)
return -EINVAL;
+ mutex_lock(&data->update_lock);
ret = _pmbus_write_word_data(client, reg->page, reg->attr->reg, val);
+ mutex_unlock(&data->update_lock);
return ret ? : count;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 0fea7c54f788..37b3b9307d07 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -709,11 +709,16 @@ static const struct i2c_algorithm xiic_algorithm = {
.functionality = xiic_func,
};
+static const struct i2c_adapter_quirks xiic_quirks = {
+ .max_read_len = 255,
+};
+
static const struct i2c_adapter xiic_adapter = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
.class = I2C_CLASS_DEPRECATED,
.algo = &xiic_algorithm,
+ .quirks = &xiic_quirks,
};
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 78dc07c6ac4b..29f7b15c81d9 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -409,27 +409,44 @@ static int rename_compat_devs(struct ib_device *device)
int ib_device_rename(struct ib_device *ibdev, const char *name)
{
+ unsigned long index;
+ void *client_data;
int ret;
down_write(&devices_rwsem);
if (!strcmp(name, dev_name(&ibdev->dev))) {
- ret = 0;
- goto out;
+ up_write(&devices_rwsem);
+ return 0;
}
if (__ib_device_get_by_name(name)) {
- ret = -EEXIST;
- goto out;
+ up_write(&devices_rwsem);
+ return -EEXIST;
}
ret = device_rename(&ibdev->dev, name);
- if (ret)
- goto out;
+ if (ret) {
+ up_write(&devices_rwsem);
+ return ret;
+ }
+
strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
ret = rename_compat_devs(ibdev);
-out:
- up_write(&devices_rwsem);
- return ret;
+
+ downgrade_write(&devices_rwsem);
+ down_read(&ibdev->client_data_rwsem);
+ xan_for_each_marked(&ibdev->client_data, index, client_data,
+ CLIENT_DATA_REGISTERED) {
+ struct ib_client *client = xa_load(&clients, index);
+
+ if (!client || !client->rename)
+ continue;
+
+ client->rename(ibdev, client_data);
+ }
+ up_read(&ibdev->client_data_rwsem);
+ up_read(&devices_rwsem);
+ return 0;
}
static int alloc_name(struct ib_device *ibdev, const char *name)
@@ -474,14 +491,15 @@ static void ib_device_release(struct device *device)
free_netdevs(dev);
WARN_ON(refcount_read(&dev->refcount));
- ib_cache_release_one(dev);
- ib_security_release_port_pkey_list(dev);
- xa_destroy(&dev->compat_devs);
- xa_destroy(&dev->client_data);
- if (dev->port_data)
+ if (dev->port_data) {
+ ib_cache_release_one(dev);
+ ib_security_release_port_pkey_list(dev);
kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
pdata[0]),
rcu_head);
+ }
+ xa_destroy(&dev->compat_devs);
+ xa_destroy(&dev->client_data);
kfree_rcu(dev, rcu_head);
}
@@ -1935,6 +1953,9 @@ static void free_netdevs(struct ib_device *ib_dev)
unsigned long flags;
unsigned int port;
+ if (!ib_dev->port_data)
+ return;
+
rdma_for_each_port (ib_dev, port) {
struct ib_port_data *pdata = &ib_dev->port_data[port];
struct net_device *ndev;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 5445323629b5..e63fbda25e1d 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -110,6 +110,8 @@ int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
+struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs);
+
/*
* This is the runtime description of the uverbs API, used by the syscall
* machinery to validate and dispatch calls.
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5a3a1780ceea..63fe14c7c68f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -174,6 +174,17 @@ static int uverbs_request_finish(struct uverbs_req_iter *iter)
return 0;
}
+/*
+ * When calling a destroy function during an error unwind we need to pass in
+ * the udata that is sanitized of all user arguments. Ie from the driver
+ * perspective it looks like no udata was passed.
+ */
+struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
+{
+ attrs->driver_udata = (struct ib_udata){};
+ return &attrs->driver_udata;
+}
+
static struct ib_uverbs_completion_event_file *
_ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
{
@@ -441,7 +452,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(uobj, attrs);
err_copy:
- ib_dealloc_pd_user(pd, &attrs->driver_udata);
+ ib_dealloc_pd_user(pd, uverbs_get_cleared_udata(attrs));
pd = NULL;
err_alloc:
kfree(pd);
@@ -644,7 +655,7 @@ err_copy:
}
err_dealloc_xrcd:
- ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
+ ib_dealloc_xrcd(xrcd, uverbs_get_cleared_udata(attrs));
err:
uobj_alloc_abort(&obj->uobject, attrs);
@@ -767,7 +778,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(uobj, attrs);
err_copy:
- ib_dereg_mr_user(mr, &attrs->driver_udata);
+ ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
err_put:
uobj_put_obj_read(pd);
@@ -1042,7 +1053,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
return obj;
err_cb:
- ib_destroy_cq(cq);
+ ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
err_file:
if (ev_file)
@@ -1478,7 +1489,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_cb:
- ib_destroy_qp(qp);
+ ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
err_put:
if (!IS_ERR(xrcd_uobj))
@@ -1611,7 +1622,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_destroy:
- ib_destroy_qp(qp);
+ ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
err_xrcd:
uobj_put_read(xrcd_uobj);
err_put:
@@ -2453,7 +2464,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(uobj, attrs);
err_copy:
- rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
+ rdma_destroy_ah_user(ah, RDMA_DESTROY_AH_SLEEPABLE,
+ uverbs_get_cleared_udata(attrs));
err_put:
uobj_put_obj_read(pd);
@@ -2964,7 +2976,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_copy:
- ib_destroy_wq(wq, &attrs->driver_udata);
+ ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
err_put_cq:
uobj_put_obj_read(cq);
err_put_pd:
@@ -3464,7 +3476,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_copy:
- ib_destroy_srq_user(srq, &attrs->driver_udata);
+ ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
err_free:
kfree(srq);
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index db5c46a1bb2d..07ea4e3c4566 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -135,7 +135,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
return 0;
err_cq:
- ib_destroy_cq(cq);
+ ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
err_event_file:
if (ev_file)
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 610d3b9f7654..997f7a3a558a 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -148,7 +148,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
return 0;
err_dereg:
- ib_dereg_mr_user(mr, &attrs->driver_udata);
+ ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
return ret;
}
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 6d6886c9009f..0fea5d63fdbe 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1728,7 +1728,6 @@ int efa_mmap(struct ib_ucontext *ibucontext,
ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
return -EPERM;
}
- vma->vm_flags &= ~VM_MAYEXEC;
return __efa_mmap(dev, ucontext, vma, key, length);
}
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 310105d4e3de..4221a99ee7f4 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -9850,6 +9850,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
/* disable the port */
clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+ cancel_work_sync(&ppd->freeze_work);
}
static inline int init_cpu_counters(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 0cd71ce7cc71..3592a9ec155e 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
u32 *tidlist = NULL;
struct tid_user_buf *tidbuf;
+ if (!PAGE_ALIGNED(tinfo->vaddr))
+ return -EINVAL;
+
tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
if (!tidbuf)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 1eb4105b2d22..a2b26a635baf 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1356,8 +1356,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
rdi->dparms.props.max_cq = hfi1_max_cqs;
rdi->dparms.props.max_ah = hfi1_max_ahs;
rdi->dparms.props.max_cqe = hfi1_max_cqes;
- rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_pd = hfi1_max_pds;
rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 4c5d0f160c10..e068a02122f5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -899,6 +899,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
+ kfree(&free_mr->mr_free_pd->ibpd);
}
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index e3ec79b8f7f5..6c8645033102 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -190,12 +190,12 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
u16 uid, phys_addr_t *addr, u32 *obj_id)
{
struct mlx5_core_dev *dev = dm->dev;
- u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
unsigned long *block_map;
u64 icm_start_addr;
u32 log_icm_size;
+ u32 num_blocks;
u32 max_blocks;
u64 block_idx;
void *sw_icm;
@@ -224,6 +224,8 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
return -EINVAL;
}
+ num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
spin_lock(&dm->lock);
block_idx = bitmap_find_next_zero_area(block_map,
@@ -266,13 +268,16 @@ int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
u16 uid, phys_addr_t addr, u32 obj_id)
{
struct mlx5_core_dev *dev = dm->dev;
- u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
unsigned long *block_map;
+ u32 num_blocks;
u64 start_idx;
int err;
+ num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+
switch (type) {
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
start_idx =
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index abac70ad5c7c..340290b883fe 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2344,7 +2344,7 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
/* Allocation size must a multiple of the basic block size
* and a power of 2.
*/
- act_size = roundup(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
+ act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
act_size = roundup_pow_of_two(act_size);
dm->size = act_size;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 5ff32d32c61c..2c4e569ce438 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1459,8 +1459,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_cq = ib_qib_max_cqs;
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
rdi->dparms.props.max_qp_init_rd_atom = 255;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 54f3f9c27552..f48240f66b8f 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -96,6 +96,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
for (i = 0; i < rdi->lkey_table.max; i++)
RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
+ rdi->dparms.props.max_mr = rdi->lkey_table.max;
+ rdi->dparms.props.max_fmr = rdi->lkey_table.max;
return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 31a2e65e4906..c5a50614a6c6 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -594,7 +594,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
offset = qpt->incr | ((offset & 1) ^ 1);
}
/* there can be no set bits in low-order QoS bits */
- WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
+ WARN_ON(rdi->dparms.qos_shift > 1 &&
+ offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
qpn = mk_qpn(qpt, map, offset);
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index be9ddcad8f28..4305da2c9037 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -148,6 +148,7 @@ MODULE_PARM_DESC(ch_count,
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device, void *client_data);
+static void srp_rename_dev(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
@@ -162,7 +163,8 @@ static struct workqueue_struct *srp_remove_wq;
static struct ib_client srp_client = {
.name = "srp",
.add = srp_add_one,
- .remove = srp_remove_one
+ .remove = srp_remove_one,
+ .rename = srp_rename_dev
};
static struct ib_sa_client srp_sa_client;
@@ -4112,6 +4114,20 @@ free_host:
return NULL;
}
+static void srp_rename_dev(struct ib_device *device, void *client_data)
+{
+ struct srp_device *srp_dev = client_data;
+ struct srp_host *host, *tmp_host;
+
+ list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
+ char name[IB_DEVICE_NAME_MAX + 8];
+
+ snprintf(name, sizeof(name), "srp-%s-%d",
+ dev_name(&device->dev), host->port);
+ device_rename(&host->dev, name);
+ }
+}
+
static void srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index aba50ec98b4d..9545e87b6085 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -694,13 +694,13 @@ static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset,
/*** Data transfer ***/
-static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
+static int mspro_block_issue_req(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
u64 t_off;
unsigned int count;
- while (chunk) {
+ while (true) {
msb->current_page = 0;
msb->current_seg = 0;
msb->seg_count = blk_rq_map_sg(msb->block_req->q,
@@ -709,6 +709,7 @@ static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
if (!msb->seg_count) {
unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
+ bool chunk;
chunk = blk_update_request(msb->block_req,
BLK_STS_RESOURCE,
@@ -718,7 +719,7 @@ static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
__blk_mq_end_request(msb->block_req,
BLK_STS_RESOURCE);
msb->block_req = NULL;
- break;
+ return -EAGAIN;
}
t_off = blk_rq_pos(msb->block_req);
@@ -735,8 +736,6 @@ static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
memstick_new_req(card->host);
return 0;
}
-
- return 1;
}
static int mspro_block_complete_req(struct memstick_dev *card, int error)
@@ -779,7 +778,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
chunk = blk_update_request(msb->block_req,
errno_to_blk_status(error), t_len);
if (chunk) {
- error = mspro_block_issue_req(card, chunk);
+ error = mspro_block_issue_req(card);
if (!error)
goto out;
} else {
@@ -849,7 +848,7 @@ static blk_status_t mspro_queue_rq(struct blk_mq_hw_ctx *hctx,
msb->block_req = bd->rq;
blk_mq_start_request(bd->rq);
- if (mspro_block_issue_req(card, true))
+ if (mspro_block_issue_req(card))
msb->block_req = NULL;
spin_unlock_irq(&msb->q_lock);
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 7618b65aab34..3bc51f19c734 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -772,6 +772,8 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
if ((m->addr == 0x0) || (m->size == 0))
return -EINVAL;
+ if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
+ return -EINVAL;
map_addr = (m->addr & PAGE_MASK);
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 0ddc28961524..2e1c4d2905e8 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -578,6 +578,10 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* determine space needed for page_list. */
data = (unsigned long)uaddr;
offs = offset_in_page(data);
+ if (size > ULONG_MAX - PAGE_SIZE - offs) {
+ m->size = 0; /* mark unused and not added */
+ return -EINVAL;
+ }
m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
m->page_list = kcalloc(m->nr_pages,
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 4804cdcf4c48..f4c92f110a72 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -26,6 +26,12 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
dma_fence_put(ctx->cs_pending[i]);
if (ctx->asid != HL_KERNEL_ASID_ID) {
+ /*
+ * The engines are stopped as there is no executing CS, but the
+ * Coresight might be still working by accessing addresses
+ * related to the stopped engines. Hence stop it explicitly.
+ */
+ hdev->asic_funcs->halt_coresight(hdev);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a4447699ff4e..ba418aaa404c 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -459,41 +459,31 @@ static ssize_t mmu_write(struct file *file, const char __user *buf,
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
- char kbuf[MMU_KBUF_SIZE], asid_kbuf[MMU_ASID_BUF_SIZE],
- addr_kbuf[MMU_ADDR_BUF_SIZE];
+ char kbuf[MMU_KBUF_SIZE];
char *c;
ssize_t rc;
if (!hdev->mmu_enable)
return count;
- memset(kbuf, 0, sizeof(kbuf));
- memset(asid_kbuf, 0, sizeof(asid_kbuf));
- memset(addr_kbuf, 0, sizeof(addr_kbuf));
-
+ if (count > sizeof(kbuf) - 1)
+ goto err;
if (copy_from_user(kbuf, buf, count))
goto err;
-
- kbuf[MMU_KBUF_SIZE - 1] = 0;
+ kbuf[count] = 0;
c = strchr(kbuf, ' ');
if (!c)
goto err;
+ *c = '\0';
- memcpy(asid_kbuf, kbuf, c - kbuf);
-
- rc = kstrtouint(asid_kbuf, 10, &dev_entry->mmu_asid);
+ rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
if (rc)
goto err;
- c = strstr(kbuf, " 0x");
- if (!c)
+ if (strncmp(c+1, "0x", 2))
goto err;
-
- c += 3;
- memcpy(addr_kbuf, c, (kbuf + count) - c);
-
- rc = kstrtoull(addr_kbuf, 16, &dev_entry->mmu_addr);
+ rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
if (rc)
goto err;
@@ -510,6 +500,7 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
{
struct hl_ctx *ctx = hdev->user_ctx;
u64 hop_addr, hop_pte_addr, hop_pte;
+ u64 offset_mask = HOP4_MASK | OFFSET_MASK;
int rc = 0;
if (!ctx) {
@@ -552,12 +543,14 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
goto not_mapped;
hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ offset_mask = OFFSET_MASK;
}
if (!(hop_pte & PAGE_PRESENT_MASK))
goto not_mapped;
- *phys_addr = (hop_pte & PTE_PHYS_ADDR_MASK) | (virt_addr & OFFSET_MASK);
+ *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask);
goto out;
@@ -600,10 +593,8 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
}
sprintf(tmp_buf, "0x%08x\n", val);
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
- strlen(tmp_buf) + 1);
-
- return rc;
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
}
static ssize_t hl_data_write32(struct file *f, const char __user *buf,
@@ -645,7 +636,6 @@ static ssize_t hl_get_power_state(struct file *f, char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[200];
- ssize_t rc;
int i;
if (*ppos)
@@ -660,10 +650,8 @@ static ssize_t hl_get_power_state(struct file *f, char __user *buf,
sprintf(tmp_buf,
"current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
- strlen(tmp_buf) + 1);
-
- return rc;
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
}
static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
@@ -716,8 +704,8 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
}
sprintf(tmp_buf, "0x%02x\n", val);
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
- strlen(tmp_buf) + 1);
+ rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
return rc;
}
@@ -806,18 +794,9 @@ static ssize_t hl_led2_write(struct file *f, const char __user *buf,
static ssize_t hl_device_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
- char tmp_buf[200];
- ssize_t rc;
-
- if (*ppos)
- return 0;
-
- sprintf(tmp_buf,
- "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
- strlen(tmp_buf) + 1);
-
- return rc;
+ static const char *help =
+ "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
+ return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
}
static ssize_t hl_device_write(struct file *f, const char __user *buf,
@@ -825,7 +804,7 @@ static ssize_t hl_device_write(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- char data[30];
+ char data[30] = {0};
/* don't allow partial writes */
if (*ppos != 0)
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 91a9e47a3482..0b19d3eefb98 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -231,6 +231,7 @@ static int device_early_init(struct hl_device *hdev)
mutex_init(&hdev->fd_open_cnt_lock);
mutex_init(&hdev->send_cpu_message_lock);
+ mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
spin_lock_init(&hdev->hw_queues_mirror_lock);
atomic_set(&hdev->in_reset, 0);
@@ -260,6 +261,7 @@ early_fini:
*/
static void device_early_fini(struct hl_device *hdev)
{
+ mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index a582e29c1ee4..02d116b01a1a 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -4819,7 +4819,8 @@ static const struct hl_asic_funcs goya_funcs = {
.set_dram_bar_base = goya_set_ddr_bar_base,
.init_iatu = goya_init_iatu,
.rreg = hl_rreg,
- .wreg = hl_wreg
+ .wreg = hl_wreg,
+ .halt_coresight = goya_halt_coresight
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 14e216cb3668..c83cab0d641e 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -202,6 +202,7 @@ void goya_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
int goya_armcp_info_get(struct hl_device *hdev);
int goya_debug_coresight(struct hl_device *hdev, void *data);
+void goya_halt_coresight(struct hl_device *hdev);
void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
int goya_mmu_clear_pgt_range(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 1ac951f52d1e..d7ec7ad84cc6 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -425,8 +425,18 @@ static int goya_config_etr(struct hl_device *hdev,
WREG32(base_reg + 0x28, 0);
WREG32(base_reg + 0x304, 0);
- if (params->output_size >= sizeof(u32))
- *(u32 *) params->output = RREG32(base_reg + 0x18);
+ if (params->output_size >= sizeof(u64)) {
+ u32 rwp, rwphi;
+
+ /*
+ * The trace buffer address is 40 bits wide. The end of
+ * the buffer is set in the RWP register (lower 32
+ * bits), and in the RWPHI register (upper 8 bits).
+ */
+ rwp = RREG32(base_reg + 0x18);
+ rwphi = RREG32(base_reg + 0x3c) & 0xff;
+ *(u64 *) params->output = ((u64) rwphi << 32) | rwp;
+ }
}
return 0;
@@ -626,3 +636,20 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
return rc;
}
+
+void goya_halt_coresight(struct hl_device *hdev)
+{
+ struct hl_debug_params params = {};
+ int i, rc;
+
+ for (i = GOYA_ETF_FIRST ; i <= GOYA_ETF_LAST ; i++) {
+ params.reg_idx = i;
+ rc = goya_config_etf(hdev, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i);
+ }
+
+ rc = goya_config_etr(hdev, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETR failed, %d\n", rc);
+}
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 71243b319920..adef7d9d7488 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -501,6 +501,7 @@ enum hl_pll_frequency {
* @init_iatu: Initialize the iATU unit inside the PCI controller.
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
+ * @halt_coresight: stop the ETF and ETR traces.
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -578,6 +579,7 @@ struct hl_asic_funcs {
int (*init_iatu)(struct hl_device *hdev);
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
+ void (*halt_coresight)(struct hl_device *hdev);
};
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index d67d24c13efd..693877e37fd8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -675,11 +675,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
total_npages += npages;
- if (first) {
- first = false;
- dma_addr &= PAGE_MASK_2MB;
- }
-
if ((npages % PGS_IN_2MB_PAGE) ||
(dma_addr & (PAGE_SIZE_2MB - 1)))
is_huge_page_opt = false;
@@ -704,7 +699,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
phys_pg_pack->total_size = total_npages * page_size;
j = 0;
- first = true;
for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
npages = get_sg_info(sg, &dma_addr);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 533d9315b6fb..10aee3141444 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -404,15 +404,12 @@ int hl_mmu_init(struct hl_device *hdev)
/* MMU H/W init was already done in device hw_init() */
- mutex_init(&hdev->mmu_cache_lock);
-
hdev->mmu_pgt_pool =
gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
if (!hdev->mmu_pgt_pool) {
dev_err(hdev->dev, "Failed to create page gen pool\n");
- rc = -ENOMEM;
- goto err_pool_create;
+ return -ENOMEM;
}
rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
@@ -436,8 +433,6 @@ int hl_mmu_init(struct hl_device *hdev)
err_pool_add:
gen_pool_destroy(hdev->mmu_pgt_pool);
-err_pool_create:
- mutex_destroy(&hdev->mmu_cache_lock);
return rc;
}
@@ -459,7 +454,6 @@ void hl_mmu_fini(struct hl_device *hdev)
kvfree(hdev->mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_pgt_pool);
- mutex_destroy(&hdev->mmu_cache_lock);
/* MMU H/W fini will be done in device hw_fini() */
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 7eebbdfbcacd..17f839dee976 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -32,12 +32,20 @@ static int recur_count = REC_NUM_DEFAULT;
static DEFINE_SPINLOCK(lock_me_up);
-static int recursive_loop(int remaining)
+/*
+ * Make sure compiler does not optimize this function or stack frame away:
+ * - function marked noinline
+ * - stack variables are marked volatile
+ * - stack variables are written (memset()) and read (pr_info())
+ * - function has external effects (pr_info())
+ * */
+static int noinline recursive_loop(int remaining)
{
- char buf[REC_STACK_SIZE];
+ volatile char buf[REC_STACK_SIZE];
- /* Make sure compiler does not optimize this away. */
- memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
+ memset((void *)buf, remaining & 0xFF, sizeof(buf));
+ pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
+ recur_count);
if (!remaining)
return 0;
else
@@ -81,9 +89,12 @@ void lkdtm_LOOP(void)
;
}
-void lkdtm_OVERFLOW(void)
+void lkdtm_EXHAUST_STACK(void)
{
- (void) recursive_loop(recur_count);
+ pr_info("Calling function with %d frame size to depth %d ...\n",
+ REC_STACK_SIZE, recur_count);
+ recursive_loop(recur_count);
+ pr_info("FAIL: survived without exhausting stack?!\n");
}
static noinline void __lkdtm_CORRUPT_STACK(void *stack)
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 1972dad966f5..8a1428d4f138 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -106,12 +106,12 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(WARNING),
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
- CRASHTYPE(OVERFLOW),
+ CRASHTYPE(EXHAUST_STACK),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(CORRUPT_STACK_STRONG),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(CORRUPT_USER_DS),
- CRASHTYPE(CORRUPT_STACK),
- CRASHTYPE(CORRUPT_STACK_STRONG),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index b69ee004a3f7..23dc565b4307 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -13,7 +13,7 @@ void lkdtm_BUG(void);
void lkdtm_WARNING(void);
void lkdtm_EXCEPTION(void);
void lkdtm_LOOP(void);
-void lkdtm_OVERFLOW(void);
+void lkdtm_EXHAUST_STACK(void);
void lkdtm_CORRUPT_STACK(void);
void lkdtm_CORRUPT_STACK_STRONG(void);
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index d5a0e7f1813b..e172719dd86d 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -324,14 +324,16 @@ free_user:
void lkdtm_USERCOPY_KERNEL_DS(void)
{
- char __user *user_ptr = (char __user *)ERR_PTR(-EINVAL);
+ char __user *user_ptr =
+ (char __user *)(0xFUL << (sizeof(unsigned long) * 8 - 4));
mm_segment_t old_fs = get_fs();
char buf[10] = {0};
- pr_info("attempting copy_to_user on unmapped kernel address\n");
+ pr_info("attempting copy_to_user() to noncanonical address: %px\n",
+ user_ptr);
set_fs(KERNEL_DS);
- if (copy_to_user(user_ptr, buf, sizeof(buf)))
- pr_info("copy_to_user un unmapped kernel address failed\n");
+ if (copy_to_user(user_ptr, buf, sizeof(buf)) == 0)
+ pr_err("copy_to_user() to noncanonical address succeeded!?\n");
set_fs(old_fs);
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index b5b9c6142f08..92900a095796 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -377,6 +377,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segment_size(mq->queue,
round_down(host->max_seg_size, block_size));
+ dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
+
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c5a8af4ca76b..5582561586b4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -859,6 +859,9 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
if (WARN_ON(!host) || WARN_ON(!host->cmd))
return IRQ_NONE;
+ /* ack all raised interrupts */
+ writel(status, host->regs + SD_EMMC_STATUS);
+
cmd = host->cmd;
data = cmd->data;
cmd->error = 0;
@@ -905,9 +908,6 @@ out:
if (ret == IRQ_HANDLED)
meson_mmc_request_done(host->mmc, cmd->mrq);
- /* ack all raised interrupts */
- writel(status, host->regs + SD_EMMC_STATUS);
-
return ret;
}
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 1e85fb7f1025..781a3e106d9a 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -856,7 +856,7 @@ static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
}
if (!first_fail) {
- WARN_ON("no edge detected, continue with hw tuned delay.\n");
+ WARN(1, "no edge detected, continue with hw tuned delay.\n");
} else if (first_pass) {
/* set tap location at fixed tap relative to the first edge */
edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d128708924e4..59acf8e3331e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2133,6 +2133,17 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
}
EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
+static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ sdhci_enable_sdio_irq_nolock(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -2581,6 +2592,7 @@ static const struct mmc_host_ops sdhci_ops = {
.get_ro = sdhci_get_ro,
.hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
+ .ack_sdio_irq = sdhci_ack_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
.execute_tuning = sdhci_execute_tuning,
@@ -3083,8 +3095,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
if ((intmask & SDHCI_INT_CARD_INT) &&
(host->ier & SDHCI_INT_CARD_INT)) {
sdhci_enable_sdio_irq_nolock(host, false);
- host->thread_isr |= SDHCI_INT_CARD_INT;
- result = IRQ_WAKE_THREAD;
+ sdio_signal_irq(host->mmc);
}
intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
@@ -3156,15 +3167,6 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
mmc_detect_change(mmc, msecs_to_jiffies(200));
}
- if (isr & SDHCI_INT_CARD_INT) {
- sdio_run_irqs(host->mmc);
-
- spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
- sdhci_enable_sdio_irq_nolock(host, true);
- spin_unlock_irqrestore(&host->lock, flags);
- }
-
return IRQ_HANDLED;
}
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index a91c0b45c48d..3222ea4d584d 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -231,7 +231,7 @@ static int sdhci_am654_init(struct sdhci_host *host)
ctl_cfg_2 = SLOTTYPE_EMBEDDED;
regmap_update_bits(sdhci_am654->base, CTL_CFG_2,
- ctl_cfg_2, SLOTTYPE_MASK);
+ SLOTTYPE_MASK, ctl_cfg_2);
return sdhci_add_host(host);
}
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 130b91cb0f8a..84cb7d2aacdf 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -842,8 +842,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
if (mrq->cmd->error || (mrq->data && mrq->data->error))
tmio_mmc_abort_dma(host);
+ /* SCC error means retune, but executed command was still successful */
if (host->check_scc_error && host->check_scc_error(host))
- mrq->cmd->error = -EILSEQ;
+ mmc_retune_needed(host->mmc);
/* If SET_BLOCK_COUNT, continue with main command */
if (host->mrq && !mrq->cmd->error) {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 42da3f1bff5b..063c7a671b41 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1388,7 +1388,7 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
int err;
if (!vid)
- return -EINVAL;
+ return -EOPNOTSUPP;
entry->vid = vid - 1;
entry->valid = false;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 0663b78a2f6c..1c3959efebc4 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -652,16 +652,6 @@ static int sja1105_speed[] = {
[SJA1105_SPEED_1000MBPS] = 1000,
};
-static sja1105_speed_t sja1105_get_speed_cfg(unsigned int speed_mbps)
-{
- int i;
-
- for (i = SJA1105_SPEED_AUTO; i <= SJA1105_SPEED_1000MBPS; i++)
- if (sja1105_speed[i] == speed_mbps)
- return i;
- return -EINVAL;
-}
-
/* Set link speed and enable/disable traffic I/O in the MAC configuration
* for a specific port.
*
@@ -684,8 +674,21 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- speed = sja1105_get_speed_cfg(speed_mbps);
- if (speed_mbps && speed < 0) {
+ switch (speed_mbps) {
+ case 0:
+ /* No speed update requested */
+ speed = SJA1105_SPEED_AUTO;
+ break;
+ case 10:
+ speed = SJA1105_SPEED_10MBPS;
+ break;
+ case 100:
+ speed = SJA1105_SPEED_100MBPS;
+ break;
+ case 1000:
+ speed = SJA1105_SPEED_1000MBPS;
+ break;
+ default:
dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
return -EINVAL;
}
@@ -695,10 +698,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* and we no longer need to store it in the static config (already told
* hardware we want auto during upload phase).
*/
- if (speed_mbps)
- mac[port].speed = speed;
- else
- mac[port].speed = SJA1105_SPEED_AUTO;
+ mac[port].speed = speed;
/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
* tables. On E/T, MAC reconfig tables are not readable, only writable.
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index c38020dcbd3a..52646855495e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -332,13 +332,13 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
{
u32 val;
int err = 0;
- bool is_locked;
- is_locked = hw_atl_sem_ram_get(self);
- if (!is_locked) {
- err = -ETIME;
+ err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
+ val, val == 1U,
+ 10U, 100000U);
+ if (err < 0)
goto err_exit;
- }
+
if (IS_CHIP_FEATURE(REVISION_B1)) {
u32 offset = 0;
@@ -350,8 +350,8 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
/* 1000 times by 10us = 10ms */
err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
self, val,
- (val & 0xF0000000) ==
- 0x80000000,
+ (val & 0xF0000000) !=
+ 0x80000000,
10U, 10000U);
}
} else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index f1cea9bea27f..da726489e3c8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -381,7 +381,7 @@ static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
self, val,
val & HW_ATL_FW2X_CTRL_SLEEP_PROXY,
- 1U, 10000U);
+ 1U, 100000U);
err_exit:
return err;
@@ -401,6 +401,8 @@ static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
msg = (struct fw2x_msg_wol *)rpc;
+ memset(msg, 0, sizeof(*msg));
+
msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
msg->magic_packet_enabled = true;
memcpy(msg->hw_addr, mac, ETH_ALEN);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index d1df0a44f93c..717fccc2efba 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -335,6 +335,7 @@ static int __lb_setup(struct net_device *ndev,
static int __lb_up(struct net_device *ndev,
enum hnae_loop loop_mode)
{
+#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
int speed, duplex;
@@ -361,6 +362,9 @@ static int __lb_up(struct net_device *ndev,
h->dev->ops->adjust_link(h, speed, duplex);
+ /* wait adjust link done and phy ready */
+ msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 7a67e23a2c2b..d8e5241097a9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1304,8 +1304,8 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
int i;
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
+ strscpy(data + i * ETH_GSTRING_LEN,
+ mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 96b53ff68c96..6cfffb64cd51 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1772,6 +1772,7 @@ static void mtk_poll_controller(struct net_device *dev)
static int mtk_start_dma(struct mtk_eth *eth)
{
+ u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
int err;
err = mtk_dma_init(eth);
@@ -1788,7 +1789,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
MTK_QDMA_GLO_CFG);
mtk_w32(eth,
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
MTK_PDMA_GLO_CFG);
@@ -2292,13 +2293,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- if (dev->features & NETIF_F_LRO) {
+ if (dev->hw_features & NETIF_F_LRO) {
cmd->data = MTK_MAX_RX_RING_NUM;
ret = 0;
}
break;
case ETHTOOL_GRXCLSRLCNT:
- if (dev->features & NETIF_F_LRO) {
+ if (dev->hw_features & NETIF_F_LRO) {
struct mtk_mac *mac = netdev_priv(dev);
cmd->rule_cnt = mac->hwlro_ip_cnt;
@@ -2306,11 +2307,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
}
break;
case ETHTOOL_GRXCLSRULE:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_entry(dev, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_all(dev, cmd,
rule_locs);
break;
@@ -2327,11 +2328,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_add_ipaddr(dev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_del_ipaddr(dev, cmd);
break;
default:
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index a4a7ec0d2531..6d1c9ebae7cc 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -643,7 +643,7 @@ void cpsw_get_ringparam(struct net_device *ndev,
struct cpsw_common *cpsw = priv->cpsw;
/* not supported */
- ering->tx_max_pending = 0;
+ ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index cf38c392b9b6..1c96bed5a7c4 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -107,7 +107,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
}
#define IPVLAN_FEATURES \
- (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+ (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9044b95d2afe..4c0616ba314d 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1073,6 +1073,7 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
int phylink_ethtool_ksettings_set(struct phylink *pl,
const struct ethtool_link_ksettings *kset)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct ethtool_link_ksettings our_kset;
struct phylink_link_state config;
int ret;
@@ -1083,11 +1084,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
kset->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
+ linkmode_copy(support, pl->supported);
config = pl->link_config;
/* Mask out unsupported advertisements */
linkmode_and(config.advertising, kset->link_modes.advertising,
- pl->supported);
+ support);
/* FIXME: should we reject autoneg if phy/mac does not support it? */
if (kset->base.autoneg == AUTONEG_DISABLE) {
@@ -1097,7 +1099,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* duplex.
*/
s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- pl->supported, false);
+ support, false);
if (!s)
return -EINVAL;
@@ -1126,7 +1128,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
}
- if (phylink_validate(pl, pl->supported, &config))
+ if (phylink_validate(pl, support, &config))
return -EINVAL;
/* If autonegotiation is enabled, we must have an advertisement */
@@ -1576,6 +1578,7 @@ static int phylink_sfp_module_insert(void *upstream,
{
struct phylink *pl = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
struct phylink_link_state config;
phy_interface_t iface;
int ret = 0;
@@ -1603,6 +1606,8 @@ static int phylink_sfp_module_insert(void *upstream,
return ret;
}
+ linkmode_copy(support1, support);
+
iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
if (iface == PHY_INTERFACE_MODE_NA) {
netdev_err(pl->netdev,
@@ -1612,7 +1617,7 @@ static int phylink_sfp_module_insert(void *upstream,
}
config.interface = iface;
- ret = phylink_validate(pl, support, &config);
+ ret = phylink_validate(pl, support1, &config);
if (ret) {
netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
phylink_an_mode_str(MLO_AN_INBAND),
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index d4635c2178d1..71812be0ac64 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -281,6 +281,7 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
{
struct i2c_msg msgs[2];
u8 bus_addr = a2 ? 0x51 : 0x50;
+ size_t this_len;
int ret;
msgs[0].addr = bus_addr;
@@ -292,11 +293,26 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
msgs[1].len = len;
msgs[1].buf = buf;
- ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- return ret;
+ while (len) {
+ this_len = len;
+ if (this_len > 16)
+ this_len = 16;
- return ret == ARRAY_SIZE(msgs) ? len : 0;
+ msgs[1].len = this_len;
+
+ ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ if (ret != ARRAY_SIZE(msgs))
+ break;
+
+ msgs[1].buf += this_len;
+ dev_addr += this_len;
+ len -= this_len;
+ }
+
+ return msgs[1].buf - (u8 *)buf;
}
static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1b7c2afd84cb..120fb593d1da 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3400,7 +3400,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
{
struct nvme_ns *ns;
__le32 *ns_list;
- unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
+ unsigned i, j, nsid, prev = 0;
+ unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
int ret = 0;
ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f562154551ce..524d6bd6d095 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2513,6 +2513,12 @@ static void nvme_reset_work(struct work_struct *work)
*/
dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
dev->ctrl.max_segments = NVME_MAX_SEGS;
+
+ /*
+ * Don't limit the IOMMU merged segment size.
+ */
+ dma_set_max_seg_size(dev->dev, 0xffffffff);
+
mutex_unlock(&dev->shutdown_lock);
/*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f383146e7d0f..97f668a39ae1 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -213,6 +213,11 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
if (!ring)
return NULL;
+ /*
+ * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
+ * lifetime. It's safe, since any chage in the underlying RDMA device
+ * will issue error recovery and queue re-creation.
+ */
for (i = 0; i < ib_queue_size; i++) {
if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
goto out_free_ring;
@@ -274,14 +279,9 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{
- struct nvme_rdma_ctrl *ctrl = set->driver_data;
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
- int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
- struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
- struct nvme_rdma_device *dev = queue->device;
- nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
- DMA_TO_DEVICE);
+ kfree(req->sqe.data);
}
static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
@@ -292,15 +292,11 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
- struct nvme_rdma_device *dev = queue->device;
- struct ib_device *ibdev = dev->dev;
- int ret;
nvme_req(rq)->ctrl = &ctrl->ctrl;
- ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
- DMA_TO_DEVICE);
- if (ret)
- return ret;
+ req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
+ if (!req->sqe.data)
+ return -ENOMEM;
req->queue = queue;
@@ -641,34 +637,16 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
struct ib_device *ibdev = ctrl->device->dev;
- unsigned int nr_io_queues;
+ unsigned int nr_io_queues, nr_default_queues;
+ unsigned int nr_read_queues, nr_poll_queues;
int i, ret;
- nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
-
- /*
- * we map queues according to the device irq vectors for
- * optimal locality so we don't need more queues than
- * completion vectors.
- */
- nr_io_queues = min_t(unsigned int, nr_io_queues,
- ibdev->num_comp_vectors);
-
- if (opts->nr_write_queues) {
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(opts->nr_write_queues, nr_io_queues);
- nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
- }
-
- ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
-
- if (opts->nr_poll_queues) {
- ctrl->io_queues[HCTX_TYPE_POLL] =
- min(opts->nr_poll_queues, num_online_cpus());
- nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
- }
+ nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
+ min(opts->nr_io_queues, num_online_cpus()));
+ nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
+ min(opts->nr_write_queues, num_online_cpus()));
+ nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
+ nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret)
@@ -681,6 +659,34 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
dev_info(ctrl->ctrl.device,
"creating %d I/O queues.\n", nr_io_queues);
+ if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
+ /*
+ * separate read/write queues
+ * hand out dedicated default queues only after we have
+ * sufficient read queues.
+ */
+ ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+ min(nr_default_queues, nr_io_queues);
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ /*
+ * shared read/write queues
+ * either no write queues were requested, or we don't have
+ * sufficient queue count to have dedicated default queues.
+ */
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+ min(nr_read_queues, nr_io_queues);
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ }
+
+ if (opts->nr_poll_queues && nr_io_queues) {
+ /* map dedicated poll queues only if we have queues left */
+ ctrl->io_queues[HCTX_TYPE_POLL] =
+ min(nr_poll_queues, nr_io_queues);
+ }
+
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
ret = nvme_rdma_alloc_queue(ctrl, i,
ctrl->ctrl.sqsize + 1);
@@ -769,6 +775,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
+ /*
+ * Bind the async event SQE DMA mapping to the admin queue lifetime.
+ * It's safe, since any chage in the underlying RDMA device will issue
+ * error recovery and queue re-creation.
+ */
error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
if (error)
@@ -1709,12 +1720,20 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
dev = queue->device->dev;
+
+ req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
+ sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ err = ib_dma_mapping_error(dev, req->sqe.dma);
+ if (unlikely(err))
+ return BLK_STS_RESOURCE;
+
ib_dma_sync_single_for_cpu(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ret = nvme_setup_cmd(ns, rq, c);
if (ret)
- return ret;
+ goto unmap_qe;
blk_mq_start_request(rq);
@@ -1739,10 +1758,16 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
}
return BLK_STS_OK;
+
err:
if (err == -ENOMEM || err == -EAGAIN)
- return BLK_STS_RESOURCE;
- return BLK_STS_IOERR;
+ ret = BLK_STS_RESOURCE;
+ else
+ ret = BLK_STS_IOERR;
+unmap_qe:
+ ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ return ret;
}
static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
@@ -1755,25 +1780,36 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
static void nvme_rdma_complete_rq(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ struct ib_device *ibdev = queue->device->dev;
- nvme_rdma_unmap_data(req->queue, rq);
+ nvme_rdma_unmap_data(queue, rq);
+ ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
nvme_complete_rq(rq);
}
static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_rdma_ctrl *ctrl = set->driver_data;
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
- if (ctrl->ctrl.opts->nr_write_queues) {
+ if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
/* separate read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_READ];
set->map[HCTX_TYPE_READ].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
} else {
- /* mixed read/write queues */
+ /* shared read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
@@ -1781,16 +1817,22 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
ctrl->device->dev, 0);
- if (ctrl->ctrl.opts->nr_poll_queues) {
+ if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
+ /* map dedicated poll queues only if we have queues left */
set->map[HCTX_TYPE_POLL].nr_queues =
ctrl->io_queues[HCTX_TYPE_POLL];
set->map[HCTX_TYPE_POLL].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- if (ctrl->ctrl.opts->nr_write_queues)
- set->map[HCTX_TYPE_POLL].queue_offset +=
- ctrl->io_queues[HCTX_TYPE_READ];
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ];
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
}
+
+ dev_info(ctrl->ctrl.device,
+ "mapped %d/%d/%d default/read/poll queues.\n",
+ ctrl->io_queues[HCTX_TYPE_DEFAULT],
+ ctrl->io_queues[HCTX_TYPE_READ],
+ ctrl->io_queues[HCTX_TYPE_POLL]);
+
return 0;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 2b107a1d152b..08a2501b9357 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -111,6 +111,7 @@ struct nvme_tcp_ctrl {
struct work_struct err_work;
struct delayed_work connect_work;
struct nvme_tcp_request async_req;
+ u32 io_queues[HCTX_MAX_TYPES];
};
static LIST_HEAD(nvme_tcp_ctrl_list);
@@ -1564,6 +1565,35 @@ static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
return nr_io_queues;
}
+static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
+ unsigned int nr_io_queues)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvmf_ctrl_options *opts = nctrl->opts;
+
+ if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
+ /*
+ * separate read/write queues
+ * hand out dedicated default queues only after we have
+ * sufficient read queues.
+ */
+ ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+ min(opts->nr_write_queues, nr_io_queues);
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ /*
+ * shared read/write queues
+ * either no write queues were requested, or we don't have
+ * sufficient queue count to have dedicated default queues.
+ */
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+ min(opts->nr_io_queues, nr_io_queues);
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ }
+}
+
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
unsigned int nr_io_queues;
@@ -1581,6 +1611,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
dev_info(ctrl->device,
"creating %d I/O queues.\n", nr_io_queues);
+ nvme_tcp_set_io_queues(ctrl, nr_io_queues);
+
return __nvme_tcp_alloc_io_queues(ctrl);
}
@@ -2089,23 +2121,34 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_tcp_ctrl *ctrl = set->driver_data;
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
- if (ctrl->ctrl.opts->nr_write_queues) {
+ if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
/* separate read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->ctrl.opts->nr_write_queues;
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_READ];
set->map[HCTX_TYPE_READ].queue_offset =
- ctrl->ctrl.opts->nr_write_queues;
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
} else {
- /* mixed read/write queues */
+ /* shared read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->ctrl.opts->nr_io_queues;
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
+
+ dev_info(ctrl->ctrl.device,
+ "mapped %d/%d default/read queues.\n",
+ ctrl->io_queues[HCTX_TYPE_DEFAULT],
+ ctrl->io_queues[HCTX_TYPE_READ]);
+
return 0;
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 3efc52f9c309..7a1cf6437a6a 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -293,6 +293,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
+ req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 121f7603a595..217f15aafa4a 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -562,14 +562,12 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
/* We currently only support kernel addresses */
BUG_ON(sid != KERNEL_SPACE);
- mtsp(sid,1);
-
/*
** WORD 1 - low order word
** "hints" parm includes the VALID bit!
** "dep" clobbers the physical address offset bits as well.
*/
- pa = virt_to_phys(vba);
+ pa = lpa(vba);
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
((u32 *)pdir_ptr)[1] = (u32) pa;
@@ -594,7 +592,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
** Grab virtual index [0:11]
** Deposit virt_idx bits into I/O PDIR word
*/
- asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
+ asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8a9ea9bd050c..296668caf7e5 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -569,11 +569,10 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
u64 pa; /* physical address */
register unsigned ci; /* coherent index */
- pa = virt_to_phys(vba);
+ pa = lpa(vba);
pa &= IOVP_MASK;
- mtsp(sid,1);
- asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 5dc53d420ca8..7b4ee33c1935 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -895,6 +895,7 @@ parport_register_dev_model(struct parport *port, const char *name,
par_dev->devmodel = true;
ret = device_register(&par_dev->dev);
if (ret) {
+ kfree(par_dev->state);
put_device(&par_dev->dev);
goto err_put_port;
}
@@ -912,6 +913,7 @@ parport_register_dev_model(struct parport *port, const char *name,
spin_unlock(&port->physport->pardevice_lock);
pr_debug("%s: cannot grant exclusive access for device %s\n",
port->name, name);
+ kfree(par_dev->state);
device_unregister(&par_dev->dev);
goto err_put_port;
}
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 009f2c0ec504..b1823d75dd35 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1274,16 +1274,20 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
return 0;
}
-static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
+static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int count = single ? 1 : card->dev->num_tx_queues;
+ int rc;
rtnl_lock();
- netif_set_real_num_tx_queues(card->dev, count);
+ rc = netif_set_real_num_tx_queues(card->dev, count);
rtnl_unlock();
+ if (rc)
+ return rc;
+
if (card->qdio.no_out_queues == count)
- return;
+ return 0;
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
qeth_free_qdio_queues(card);
@@ -1293,12 +1297,14 @@ static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
card->qdio.no_out_queues = count;
+ return 0;
}
static int qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc;
+ int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "chp_desc");
@@ -1311,12 +1317,12 @@ static int qeth_update_from_chp_desc(struct qeth_card *card)
if (IS_OSD(card) || IS_OSX(card))
/* CHPP field bit 6 == 1 -> single queue */
- qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
+ rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
kfree(chp_dsc);
QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
- return 0;
+ return rc;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@@ -5597,8 +5603,12 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
if (IS_IQD(card)) {
- netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ);
dev->features |= NETIF_F_SG;
+ if (netif_set_real_num_tx_queues(dev,
+ QETH_IQD_MIN_TXQ)) {
+ free_netdev(dev);
+ return NULL;
+ }
}
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 218801232ca2..ff8a6cd790b1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1680,7 +1680,7 @@ static void qeth_bridgeport_an_set_cb(void *priv,
l2entry = (struct qdio_brinfo_entry_l2 *)entry;
code = IPA_ADDR_CHANGE_CODE_MACADDR;
- if (l2entry->addr_lnid.lnid)
+ if (l2entry->addr_lnid.lnid < VLAN_N_VID)
code |= IPA_ADDR_CHANGE_CODE_VLANID;
qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
(struct net_if_token *)&l2entry->nit,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0271833da6a2..13bf3e2e9cea 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1888,13 +1888,20 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int qeth_l3_get_cast_type(struct sk_buff *skb)
{
+ int ipv = qeth_get_ip_version(skb);
struct neighbour *n = NULL;
struct dst_entry *dst;
rcu_read_lock();
dst = skb_dst(skb);
- if (dst)
- n = dst_neigh_lookup_skb(dst, skb);
+ if (dst) {
+ struct rt6_info *rt = (struct rt6_info *) dst;
+
+ dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
+ if (dst)
+ n = dst_neigh_lookup_skb(dst, skb);
+ }
+
if (n) {
int cast_type = n->type;
@@ -1909,8 +1916,10 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
rcu_read_unlock();
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
- switch (qeth_get_ip_version(skb)) {
+ switch (ipv) {
case 4:
+ if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
+ return RTN_BROADCAST;
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
case 6:
@@ -1940,6 +1949,7 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
struct qeth_card *card = queue->card;
+ struct dst_entry *dst;
hdr->hdr.l3.length = data_len;
@@ -1985,15 +1995,27 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
}
rcu_read_lock();
+ dst = skb_dst(skb);
+
if (ipv == 4) {
- struct rtable *rt = skb_rtable(skb);
+ struct rtable *rt;
+
+ if (dst)
+ dst = dst_check(dst, 0);
+ rt = (struct rtable *) dst;
*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
rt_nexthop(rt, ip_hdr(skb)->daddr) :
ip_hdr(skb)->daddr;
} else {
/* IPv6 */
- const struct rt6_info *rt = skb_rt6_info(skb);
+ struct rt6_info *rt;
+
+ if (dst) {
+ rt = (struct rt6_info *) dst;
+ dst = dst_check(dst, rt6_get_cookie(rt));
+ }
+ rt = (struct rt6_info *) dst;
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index d6be4e8f4a8f..8fd5ffc55792 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -4046,8 +4046,10 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
return -ETIMEDOUT;
msecs_blocked =
jiffies_to_msecs(jiffies - start_jiffies);
- if (msecs_blocked >= timeout_msecs)
- return -ETIMEDOUT;
+ if (msecs_blocked >= timeout_msecs) {
+ rc = -ETIMEDOUT;
+ goto out;
+ }
timeout_msecs -= msecs_blocked;
}
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 8c1c551f2b42..3fe3029617a8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1917,7 +1917,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
/* Get the descriptor */
- if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ if (hba->dev_cmd.query.descriptor &&
+ lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
GENERAL_UPIU_REQUEST_SIZE;
u16 resp_len;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df51a35cf537..2d9df786a9d3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -604,12 +604,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
return iov_iter_count(iter);
}
-static bool vhost_exceeds_weight(int pkts, int total_len)
-{
- return total_len >= VHOST_NET_WEIGHT ||
- pkts >= VHOST_NET_PKT_WEIGHT;
-}
-
static int get_tx_bufs(struct vhost_net *net,
struct vhost_net_virtqueue *nvq,
struct msghdr *msg,
@@ -779,7 +773,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
- for (;;) {
+ do {
bool busyloop_intr = false;
if (nvq->done_idx == VHOST_NET_BATCH)
@@ -845,11 +839,7 @@ done:
vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->done_idx].len = 0;
++nvq->done_idx;
- if (vhost_exceeds_weight(++sent_pkts, total_len)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
vhost_tx_batch(net, nvq, sock, &msg);
}
@@ -874,7 +864,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
bool zcopy_used;
int sent_pkts = 0;
- for (;;) {
+ do {
bool busyloop_intr;
/* Release DMAs done buffers first */
@@ -951,11 +941,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
- if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
}
/* Expects to be always run from workqueue - which acts as
@@ -1153,8 +1139,11 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
- while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
- &busyloop_intr))) {
+ do {
+ sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+ &busyloop_intr);
+ if (!sock_len)
+ break;
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -1239,14 +1228,11 @@ static void handle_rx(struct vhost_net *net)
vhost_log_write(vq, vq_log, log, vhost_len,
vq->iov, in);
total_len += vhost_len;
- if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
- vhost_poll_queue(&vq->poll);
- goto out;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
+
if (unlikely(busyloop_intr))
vhost_poll_queue(&vq->poll);
- else
+ else if (!sock_len)
vhost_net_enable_vq(net, vq);
out:
vhost_net_signal_used(nvq);
@@ -1338,7 +1324,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vhost_net_buf_init(&n->vqs[i].rxq);
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
- UIO_MAXIOV + VHOST_NET_BATCH);
+ UIO_MAXIOV + VHOST_NET_BATCH,
+ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c090d177bd75..a9caf1bc3c3e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -57,6 +57,12 @@
#define VHOST_SCSI_PREALLOC_UPAGES 2048
#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
+/* Max number of requests before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * request.
+ */
+#define VHOST_SCSI_WEIGHT 256
+
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
struct completion comp;
@@ -912,7 +918,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- int ret, prot_bytes;
+ int ret, prot_bytes, c = 0;
u16 lun;
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
@@ -932,7 +938,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
- for (;;) {
+ do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
if (ret)
goto err;
@@ -1112,7 +1118,7 @@ err:
break;
else if (ret == -EIO)
vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
@@ -1171,7 +1177,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
} v_req;
struct vhost_scsi_ctx vc;
size_t typ_size;
- int ret;
+ int ret, c = 0;
mutex_lock(&vq->mutex);
/*
@@ -1185,7 +1191,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
- for (;;) {
+ do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
if (ret)
goto err;
@@ -1264,7 +1270,7 @@ err:
break;
else if (ret == -EIO)
vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
@@ -1621,7 +1627,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
+ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
+ VHOST_SCSI_WEIGHT, 0);
vhost_scsi_init_inflight(vs, NULL);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1e3ed41ae1f3..3f3eac4bcc58 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -413,8 +413,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
vhost_vq_free_iovecs(dev->vqs[i]);
}
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
+ int pkts, int total_len)
+{
+ struct vhost_dev *dev = vq->dev;
+
+ if ((dev->byte_weight && total_len >= dev->byte_weight) ||
+ pkts >= dev->weight) {
+ vhost_poll_queue(&vq->poll);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
+
void vhost_dev_init(struct vhost_dev *dev,
- struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
+ struct vhost_virtqueue **vqs, int nvqs,
+ int iov_limit, int weight, int byte_weight)
{
struct vhost_virtqueue *vq;
int i;
@@ -428,6 +444,8 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->mm = NULL;
dev->worker = NULL;
dev->iov_limit = iov_limit;
+ dev->weight = weight;
+ dev->byte_weight = byte_weight;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 9490e7ddb340..27a78a9b8cc7 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -171,10 +171,13 @@ struct vhost_dev {
struct list_head pending_list;
wait_queue_head_t wait;
int iov_limit;
+ int weight;
+ int byte_weight;
};
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
- int nvqs, int iov_limit);
+ int nvqs, int iov_limit, int weight, int byte_weight);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bb5fc0e9fbc2..814bed72d793 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -21,6 +21,14 @@
#include "vhost.h"
#define VHOST_VSOCK_DEFAULT_HOST_CID 2
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_VSOCK_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * small pkts.
+ */
+#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES,
@@ -78,6 +86,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
{
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ int pkts = 0, total_len = 0;
bool added = false;
bool restart_tx = false;
@@ -89,7 +98,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
- for (;;) {
+ do {
struct virtio_vsock_pkt *pkt;
struct iov_iter iov_iter;
unsigned out, in;
@@ -174,8 +183,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
*/
virtio_transport_deliver_tap_pkt(pkt);
+ total_len += pkt->len;
virtio_transport_free_pkt(pkt);
- }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
if (added)
vhost_signal(&vsock->dev, vq);
@@ -350,7 +360,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
dev);
struct virtio_vsock_pkt *pkt;
- int head;
+ int head, pkts = 0, total_len = 0;
unsigned int out, in;
bool added = false;
@@ -360,7 +370,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
goto out;
vhost_disable_notify(&vsock->dev, vq);
- for (;;) {
+ do {
u32 len;
if (!vhost_vsock_more_replies(vsock)) {
@@ -401,9 +411,11 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
else
virtio_transport_free_pkt(pkt);
- vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
+ len += sizeof(pkt->hdr);
+ vhost_add_used(vq, head, len);
+ total_len += len;
added = true;
- }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
no_more_replies:
if (added)
@@ -531,7 +543,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
- vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
+ UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
+ VHOST_VSOCK_WEIGHT);
file->private_data = vsock;
spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 9aea44ed54c7..023fc3bc01c6 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -63,12 +63,12 @@ config VIRTIO_INPUT
If unsure, say M.
- config VIRTIO_MMIO
+config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
depends on HAS_IOMEM && HAS_DMA
- select VIRTIO
- ---help---
- This drivers provides support for memory mapped virtio
+ select VIRTIO
+ ---help---
+ This drivers provides support for memory mapped virtio
platform device driver.
If unsure, say N.
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index 92e8f0755b9a..edf0bc98012c 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -138,7 +138,7 @@ static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
W1_F29_REG_CONTROL_AND_STATUS, buf);
}
-#ifdef fCONFIG_W1_SLAVE_DS2408_READBACK
+#ifdef CONFIG_W1_SLAVE_DS2408_READBACK
static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
{
u8 w1_buf[3];