summaryrefslogtreecommitdiff
path: root/drivers/fpga
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/fpga')
-rw-r--r--drivers/fpga/Kconfig16
-rw-r--r--drivers/fpga/Makefile5
-rw-r--r--drivers/fpga/altera-cvp.c23
-rw-r--r--drivers/fpga/altera-fpga2sdram.c12
-rw-r--r--drivers/fpga/altera-freeze-bridge.c15
-rw-r--r--drivers/fpga/altera-hps2fpga.c16
-rw-r--r--drivers/fpga/altera-pr-ip-core-plat.c9
-rw-r--r--drivers/fpga/altera-ps-spi.c47
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c117
-rw-r--r--drivers/fpga/dfl-afu-error.c59
-rw-r--r--drivers/fpga/dfl-afu-main.c292
-rw-r--r--drivers/fpga/dfl-afu-region.c50
-rw-r--r--drivers/fpga/dfl-afu.h31
-rw-r--r--drivers/fpga/dfl-fme-br.c36
-rw-r--r--drivers/fpga/dfl-fme-error.c98
-rw-r--r--drivers/fpga/dfl-fme-main.c130
-rw-r--r--drivers/fpga/dfl-fme-mgr.c4
-rw-r--r--drivers/fpga/dfl-fme-perf.c2
-rw-r--r--drivers/fpga/dfl-fme-pr.c90
-rw-r--r--drivers/fpga/dfl-fme-pr.h2
-rw-r--r--drivers/fpga/dfl-fme-region.c12
-rw-r--r--drivers/fpga/dfl-fme.h2
-rw-r--r--drivers/fpga/dfl-pci.c54
-rw-r--r--drivers/fpga/dfl.c706
-rw-r--r--drivers/fpga/dfl.h182
-rw-r--r--drivers/fpga/fpga-bridge.c125
-rw-r--r--drivers/fpga/fpga-mgr.c103
-rw-r--r--drivers/fpga/fpga-region.c61
-rw-r--r--drivers/fpga/ice40-spi.c8
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c476
-rw-r--r--drivers/fpga/lattice-sysconfig-spi.c1
-rw-r--r--drivers/fpga/microchip-spi.c149
-rw-r--r--drivers/fpga/of-fpga-region.c6
-rw-r--r--drivers/fpga/socfpga-a10.c11
-rw-r--r--drivers/fpga/socfpga.c14
-rw-r--r--drivers/fpga/stratix10-soc.c9
-rw-r--r--drivers/fpga/tests/.kunitconfig5
-rw-r--r--drivers/fpga/tests/Kconfig11
-rw-r--r--drivers/fpga/tests/Makefile6
-rw-r--r--drivers/fpga/tests/fpga-bridge-test.c174
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c335
-rw-r--r--drivers/fpga/tests/fpga-region-test.c218
-rw-r--r--drivers/fpga/ts73xx-fpga.c4
-rw-r--r--drivers/fpga/versal-fpga.c4
-rw-r--r--drivers/fpga/xilinx-core.c229
-rw-r--r--drivers/fpga/xilinx-core.h27
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c27
-rw-r--r--drivers/fpga/xilinx-selectmap.c95
-rw-r--r--drivers/fpga/xilinx-spi.c231
-rw-r--r--drivers/fpga/zynq-fpga.c34
-rw-r--r--drivers/fpga/zynqmp-fpga.c21
51 files changed, 2803 insertions, 1591 deletions
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 6ce143dafd04..37b35f58f0df 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -64,9 +64,21 @@ config FPGA_MGR_STRATIX10_SOC
help
FPGA manager driver support for the Intel Stratix10 SoC.
+config FPGA_MGR_XILINX_CORE
+ tristate
+
+config FPGA_MGR_XILINX_SELECTMAP
+ tristate "Xilinx Configuration over SelectMAP"
+ depends on HAS_IOMEM
+ select FPGA_MGR_XILINX_CORE
+ help
+ FPGA manager driver support for Xilinx FPGA configuration
+ over SelectMAP interface.
+
config FPGA_MGR_XILINX_SPI
tristate "Xilinx Configuration over Slave Serial (SPI)"
depends on SPI
+ select FPGA_MGR_XILINX_CORE
help
FPGA manager driver support for Xilinx FPGA configuration
over slave serial interface.
@@ -246,7 +258,7 @@ config FPGA_MGR_VERSAL_FPGA
config FPGA_M10_BMC_SEC_UPDATE
tristate "Intel MAX10 BMC Secure Update driver"
- depends on MFD_INTEL_M10_BMC
+ depends on MFD_INTEL_M10_BMC_CORE
select FW_LOADER
select FW_UPLOAD
help
@@ -276,4 +288,6 @@ config FPGA_MGR_LATTICE_SYSCONFIG_SPI
FPGA manager driver support for Lattice FPGAs programming over slave
SPI sysCONFIG interface.
+source "drivers/fpga/tests/Kconfig"
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 72e554b4d2f7..aeb89bb13517 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o
obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
+obj-$(CONFIG_FPGA_MGR_XILINX_CORE) += xilinx-core.o
+obj-$(CONFIG_FPGA_MGR_XILINX_SELECTMAP) += xilinx-selectmap.o
obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
@@ -55,3 +57,6 @@ obj-$(CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000) += dfl-n3000-nios.o
# Drivers for FPGAs which implement DFL
obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
+
+# KUnit tests
+obj-$(CONFIG_FPGA_KUNIT_TESTS) += tests/
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 4ffb9da537d8..44badfd11e1b 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -22,9 +22,6 @@
#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
/* Vendor Specific Extended Capability Registers */
-#define VSE_PCIE_EXT_CAP_ID 0x0
-#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
-
#define VSE_CVP_STATUS 0x1c /* 32bit */
#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
@@ -52,7 +49,7 @@
/* V2 Defines */
#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
-#define V2_CREDIT_TIMEOUT_US 20000
+#define V2_CREDIT_TIMEOUT_US 40000
#define V2_CHECK_CREDIT_US 10
#define V2_POLL_TIMEOUT_US 1000000
#define V2_USER_TIMEOUT_US 500000
@@ -72,7 +69,6 @@ static bool altera_cvp_chkcfg;
struct cvp_priv;
struct altera_cvp_conf {
- struct fpga_manager *mgr;
struct pci_dev *pci_dev;
void __iomem *map;
void (*write_data)(struct altera_cvp_conf *conf,
@@ -578,25 +574,18 @@ static int altera_cvp_probe(struct pci_dev *pdev,
{
struct altera_cvp_conf *conf;
struct fpga_manager *mgr;
- int ret, offset;
- u16 cmd, val;
+ u16 cmd, offset;
u32 regval;
-
- /* Discover the Vendor Specific Offset for this device */
- offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR);
- if (!offset) {
- dev_err(&pdev->dev, "No Vendor Specific Offset.\n");
- return -ENODEV;
- }
+ int ret;
/*
* First check if this is the expected FPGA device. PCI config
* space access works without enabling the PCI device, memory
* space access is enabled further down.
*/
- pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val);
- if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
- dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
+ offset = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_ALTERA, 0x1172);
+ if (!offset) {
+ dev_err(&pdev->dev, "Wrong VSEC ID value\n");
return -ENODEV;
}
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index ff3a646fd9e3..e41492988dd6 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -27,7 +27,7 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#define ALT_SDR_CTL_FPGAPORTRST_OFST 0x80
@@ -75,12 +75,6 @@ static int alt_fpga2sdram_enable_set(struct fpga_bridge *bridge, bool enable)
return _alt_fpga2sdram_enable_set(bridge->priv, enable);
}
-struct prop_map {
- char *prop_name;
- u32 *prop_value;
- u32 prop_max;
-};
-
static const struct fpga_bridge_ops altera_fpga2sdram_br_ops = {
.enable_set = alt_fpga2sdram_enable_set,
.enable_show = alt_fpga2sdram_enable_show,
@@ -147,13 +141,11 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
return ret;
}
-static int alt_fpga_bridge_remove(struct platform_device *pdev)
+static void alt_fpga_bridge_remove(struct platform_device *pdev)
{
struct fpga_bridge *br = platform_get_drvdata(pdev);
fpga_bridge_unregister(br);
-
- return 0;
}
MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index 445f4b011167..594693ff786e 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -7,8 +7,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/fpga/fpga-bridge.h>
#define FREEZE_CSR_STATUS_OFFSET 0
@@ -198,13 +199,11 @@ static const struct fpga_bridge_ops altera_freeze_br_br_ops = {
.enable_show = altera_freeze_br_enable_show,
};
-#ifdef CONFIG_OF
static const struct of_device_id altera_freeze_br_of_match[] = {
{ .compatible = "altr,freeze-bridge-controller", },
{},
};
MODULE_DEVICE_TABLE(of, altera_freeze_br_of_match);
-#endif
static int altera_freeze_br_probe(struct platform_device *pdev)
{
@@ -213,14 +212,12 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
void __iomem *base_addr;
struct altera_freeze_br_data *priv;
struct fpga_bridge *br;
- struct resource *res;
u32 status, revision;
if (!np)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_addr = devm_ioremap_resource(dev, res);
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_addr))
return PTR_ERR(base_addr);
@@ -256,13 +253,11 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
return 0;
}
-static int altera_freeze_br_remove(struct platform_device *pdev)
+static void altera_freeze_br_remove(struct platform_device *pdev)
{
struct fpga_bridge *br = platform_get_drvdata(pdev);
fpga_bridge_unregister(br);
-
- return 0;
}
static struct platform_driver altera_freeze_br_driver = {
@@ -270,7 +265,7 @@ static struct platform_driver altera_freeze_br_driver = {
.remove = altera_freeze_br_remove,
.driver = {
.name = "altera_freeze_br",
- .of_match_table = of_match_ptr(altera_freeze_br_of_match),
+ .of_match_table = altera_freeze_br_of_match,
},
};
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index aa758426c22b..f2f1250689cb 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -24,7 +24,8 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
@@ -127,18 +128,11 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct altera_hps2fpga_data *priv;
- const struct of_device_id *of_id;
struct fpga_bridge *br;
u32 enable;
int ret;
- of_id = of_match_device(altera_fpga_of_match, dev);
- if (!of_id) {
- dev_err(dev, "failed to match device\n");
- return -ENODEV;
- }
-
- priv = (struct altera_hps2fpga_data *)of_id->data;
+ priv = (struct altera_hps2fpga_data *)device_get_match_data(dev);
priv->bridge_reset = of_reset_control_get_exclusive_by_index(dev->of_node,
0);
@@ -197,7 +191,7 @@ err:
return ret;
}
-static int alt_fpga_bridge_remove(struct platform_device *pdev)
+static void alt_fpga_bridge_remove(struct platform_device *pdev)
{
struct fpga_bridge *bridge = platform_get_drvdata(pdev);
struct altera_hps2fpga_data *priv = bridge->priv;
@@ -205,8 +199,6 @@ static int alt_fpga_bridge_remove(struct platform_device *pdev)
fpga_bridge_unregister(bridge);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c
index b008a6b8d2d3..9dc263930007 100644
--- a/drivers/fpga/altera-pr-ip-core-plat.c
+++ b/drivers/fpga/altera-pr-ip-core-plat.c
@@ -9,19 +9,16 @@
*/
#include <linux/fpga/altera-pr-ip-core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
static int alt_pr_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
void __iomem *reg_base;
- struct resource *res;
/* First mmio base is for register access */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- reg_base = devm_ioremap_resource(dev, res);
-
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 5e1e009dba89..d0ec3539b31f 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -18,8 +18,7 @@
#include <linux/fpga/fpga-mgr.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/sizes.h>
@@ -72,12 +71,6 @@ static struct altera_ps_data a10_data = {
.t_st2ck_us = 10, /* min(t_ST2CK) */
};
-/* Array index is enum altera_ps_devtype */
-static const struct altera_ps_data *altera_ps_data_map[] = {
- &c5_data,
- &a10_data,
-};
-
static const struct of_device_id of_ef_match[] = {
{ .compatible = "altr,fpga-passive-serial", .data = &c5_data },
{ .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data },
@@ -237,43 +230,16 @@ static const struct fpga_manager_ops altera_ps_ops = {
.write_complete = altera_ps_write_complete,
};
-static const struct altera_ps_data *id_to_data(const struct spi_device_id *id)
-{
- kernel_ulong_t devtype = id->driver_data;
- const struct altera_ps_data *data;
-
- /* someone added a altera_ps_devtype without adding to the map array */
- if (devtype >= ARRAY_SIZE(altera_ps_data_map))
- return NULL;
-
- data = altera_ps_data_map[devtype];
- if (!data || data->devtype != devtype)
- return NULL;
-
- return data;
-}
-
static int altera_ps_probe(struct spi_device *spi)
{
struct altera_ps_conf *conf;
- const struct of_device_id *of_id;
struct fpga_manager *mgr;
conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
if (!conf)
return -ENOMEM;
- if (spi->dev.of_node) {
- of_id = of_match_device(of_ef_match, &spi->dev);
- if (!of_id)
- return -ENODEV;
- conf->data = of_id->data;
- } else {
- conf->data = id_to_data(spi_get_device_id(spi));
- if (!conf->data)
- return -ENODEV;
- }
-
+ conf->data = spi_get_device_match_data(spi);
conf->spi = spi;
conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
if (IS_ERR(conf->config)) {
@@ -308,9 +274,9 @@ static int altera_ps_probe(struct spi_device *spi)
}
static const struct spi_device_id altera_ps_spi_ids[] = {
- { "cyclone-ps-spi", CYCLONE5 },
- { "fpga-passive-serial", CYCLONE5 },
- { "fpga-arria10-passive-serial", ARRIA10 },
+ { "cyclone-ps-spi", (uintptr_t)&c5_data },
+ { "fpga-passive-serial", (uintptr_t)&c5_data },
+ { "fpga-arria10-passive-serial", (uintptr_t)&a10_data },
{}
};
MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids);
@@ -318,8 +284,7 @@ MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids);
static struct spi_driver altera_ps_driver = {
.driver = {
.name = "altera-ps-spi",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(of_ef_match),
+ .of_match_table = of_ef_match,
},
.id_table = altera_ps_spi_ids,
.probe = altera_ps_probe,
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 02b60fde0430..5aa7b8884374 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -16,26 +16,26 @@
#include "dfl-afu.h"
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
afu->dma_regions = RB_ROOT;
}
/**
* afu_dma_pin_pages - pin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be pinned
*
* Pin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+static int afu_dma_pin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
int npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
int ret, pinned;
ret = account_locked_vm(current->mm, npages, true);
@@ -73,17 +73,17 @@ unlock_vm:
/**
* afu_dma_unpin_pages - unpin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be unpinned
*
* Unpin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+static void afu_dma_unpin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
long npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
unpin_user_pages(region->pages, npages);
kfree(region->pages);
@@ -133,20 +133,20 @@ static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
/**
* afu_dma_region_add - add given dma region to rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be added
*
* Return 0 for success, -EEXIST if dma region has already been added.
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+static int afu_dma_region_add(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node **new, *parent = NULL;
- dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "add region (iova = %llx)\n",
(unsigned long long)region->iova);
new = &afu->dma_regions.rb_node;
@@ -177,50 +177,50 @@ static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
/**
* afu_dma_region_remove - remove given dma region from rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be removed
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+static void afu_dma_region_remove(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
struct dfl_afu *afu;
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
rb_erase(&region->node, &afu->dma_regions);
}
/**
* afu_dma_region_destroy - destroy all regions in rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = rb_first(&afu->dma_regions);
struct dfl_afu_dma_region *region;
while (node) {
region = container_of(node, struct dfl_afu_dma_region, node);
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
rb_erase(node, &afu->dma_regions);
if (region->iova)
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length,
DMA_BIDIRECTIONAL);
if (region->pages)
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
node = rb_next(node);
kfree(region);
@@ -229,7 +229,7 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
/**
* afu_dma_region_find - find the dma region from rbtree based on iova and size
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma memory area
* @size: size of the dma memory area
*
@@ -239,14 +239,14 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
* [@iova, @iova+size)
* If nothing is matched returns NULL.
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+afu_dma_region_find(struct dfl_feature_dev_data *fdata, u64 iova, u64 size)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = afu->dma_regions.rb_node;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
while (node) {
struct dfl_afu_dma_region *region;
@@ -276,20 +276,20 @@ afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
/**
* afu_dma_region_find_iova - find the dma region from rbtree by iova
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma region
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
static struct dfl_afu_dma_region *
-afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+afu_dma_region_find_iova(struct dfl_feature_dev_data *fdata, u64 iova)
{
- return afu_dma_region_find(pdata, iova, 0);
+ return afu_dma_region_find(fdata, iova, 0);
}
/**
* afu_dma_map_region - map memory region for dma
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @user_addr: address of the memory region
* @length: size of the memory region
* @iova: pointer of iova address
@@ -298,9 +298,10 @@ afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
* of the memory region via @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_dma_region *region;
int ret;
@@ -323,47 +324,47 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
region->length = length;
/* Pin the user memory region */
- ret = afu_dma_pin_pages(pdata, region);
+ ret = afu_dma_pin_pages(fdata, region);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ dev_err(dev, "failed to pin memory region\n");
goto free_region;
}
/* Only accept continuous pages, return error else */
if (!afu_dma_check_continuous_pages(region)) {
- dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ dev_err(dev, "pages are not continuous\n");
ret = -EINVAL;
goto unpin_pages;
}
/* As pages are continuous then start to do DMA mapping */
- region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova = dma_map_page(dfl_fpga_fdata_to_parent(fdata),
region->pages[0], 0,
region->length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
- dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ if (dma_mapping_error(dfl_fpga_fdata_to_parent(fdata), region->iova)) {
+ dev_err(dev, "failed to map for dma\n");
ret = -EFAULT;
goto unpin_pages;
}
*iova = region->iova;
- mutex_lock(&pdata->lock);
- ret = afu_dma_region_add(pdata, region);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = afu_dma_region_add(fdata, region);
+ mutex_unlock(&fdata->lock);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ dev_err(dev, "failed to add dma region\n");
goto unmap_dma;
}
return 0;
unmap_dma:
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
unpin_pages:
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
free_region:
kfree(region);
return ret;
@@ -371,34 +372,34 @@ free_region:
/**
* afu_dma_unmap_region - unmap dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: dma address of the region
*
* Unmap dma memory region based on @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova)
{
struct dfl_afu_dma_region *region;
- mutex_lock(&pdata->lock);
- region = afu_dma_region_find_iova(pdata, iova);
+ mutex_lock(&fdata->lock);
+ region = afu_dma_region_find_iova(fdata, iova);
if (!region) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EINVAL;
}
if (region->in_use) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
- afu_dma_region_remove(pdata, region);
- mutex_unlock(&pdata->lock);
+ afu_dma_region_remove(fdata, region);
+ mutex_unlock(&fdata->lock);
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
kfree(region);
return 0;
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index ab7be6217368..0f392d1f6d45 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -28,37 +28,36 @@
#define ERROR_MASK GENMASK_ULL(63, 0)
/* mask or unmask port errors by the error mask register. */
-static void __afu_port_err_mask(struct device *dev, bool mask)
+static void __afu_port_err_mask(struct dfl_feature_dev_data *fdata, bool mask)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
}
static void afu_port_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- mutex_lock(&pdata->lock);
- __afu_port_err_mask(dev, mask);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ __afu_port_err_mask(fdata, mask);
+ mutex_unlock(&fdata->lock);
}
/* clear port errors. */
static int afu_port_err_clear(struct device *dev, u64 err)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
- struct platform_device *pdev = to_platform_device(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base_err, *base_hdr;
int enable_ret = 0, ret = -EBUSY;
u64 v;
- base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
- base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base_err = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
+ base_hdr = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/*
* clear Port Errors
@@ -80,12 +79,12 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Halt Port by keeping Port in reset */
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
goto done;
/* Mask all errors */
- __afu_port_err_mask(dev, true);
+ __afu_port_err_mask(fdata, true);
/* Clear errors if err input matches with current port errors.*/
v = readq(base_err + PORT_ERROR);
@@ -102,28 +101,28 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Clear mask */
- __afu_port_err_mask(dev, false);
+ __afu_port_err_mask(fdata, false);
/* Enable the Port by clearing the reset */
- enable_ret = __afu_port_enable(pdev);
+ enable_ret = __afu_port_enable(fdata);
done:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return enable_ret ? enable_ret : ret;
}
static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -146,15 +145,15 @@ static DEVICE_ATTR_RW(errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -164,16 +163,16 @@ static ssize_t first_malformed_req_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 req0, req1;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
req0 = readq(base + PORT_MALFORMED_REQ0);
req1 = readq(base + PORT_MALFORMED_REQ1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%016llx%016llx\n",
(unsigned long long)req1, (unsigned long long)req0);
@@ -191,12 +190,14 @@ static umode_t port_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_ERROR))
return 0;
return attr->mode;
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 7f621e96d3b8..3bf8e7338dbe 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -26,7 +26,7 @@
/**
* __afu_port_enable - enable a port by clear reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Enable Port by clear the port soft reset bit, which is set by default.
* The AFU is unable to respond to any MMIO access while in reset.
@@ -35,18 +35,17 @@
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_enable(struct platform_device *pdev)
+int __afu_port_enable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- WARN_ON(!pdata->disable_count);
+ WARN_ON(!fdata->disable_count);
- if (--pdata->disable_count != 0)
+ if (--fdata->disable_count != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Clear port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -60,7 +59,8 @@ int __afu_port_enable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
!(v & PORT_CTRL_SFTRST_ACK),
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to enable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to enable device\n");
return -ETIMEDOUT;
}
@@ -69,22 +69,21 @@ int __afu_port_enable(struct platform_device *pdev)
/**
* __afu_port_disable - disable a port by hold reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Disable Port by setting the port soft reset bit, it puts the port into reset.
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_disable(struct platform_device *pdev)
+int __afu_port_disable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- if (pdata->disable_count++ != 0)
+ if (fdata->disable_count++ != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Set port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -99,7 +98,8 @@ int __afu_port_disable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to disable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to disable device\n");
return -ETIMEDOUT;
}
@@ -118,34 +118,34 @@ int __afu_port_disable(struct platform_device *pdev)
* (disabled). Any attempts on MMIO access to AFU while in reset, will
* result errors reported via port error reporting sub feature (if present).
*/
-static int __port_reset(struct platform_device *pdev)
+static int __port_reset(struct dfl_feature_dev_data *fdata)
{
int ret;
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
return ret;
- return __afu_port_enable(pdev);
+ return __afu_port_enable(fdata);
}
static int port_reset(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
- ret = __port_reset(pdev);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = __port_reset(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
-static int port_get_id(struct platform_device *pdev)
+static int port_get_id(struct dfl_feature_dev_data *fdata)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
}
@@ -153,7 +153,8 @@ static int port_get_id(struct platform_device *pdev)
static ssize_t
id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int id = port_get_id(to_platform_device(dev));
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
+ int id = port_get_id(fdata);
return scnprintf(buf, PAGE_SIZE, "%d\n", id);
}
@@ -162,15 +163,15 @@ static DEVICE_ATTR_RO(id);
static ssize_t
ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
}
@@ -179,7 +180,7 @@ static ssize_t
ltr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool ltr;
u64 v;
@@ -187,14 +188,14 @@ ltr_store(struct device *dev, struct device_attribute *attr,
if (kstrtobool(buf, &ltr))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
v &= ~PORT_CTRL_LATENCY;
v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
writeq(v, base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -203,15 +204,15 @@ static DEVICE_ATTR_RW(ltr);
static ssize_t
ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
}
@@ -220,18 +221,18 @@ static ssize_t
ap1_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -241,15 +242,15 @@ static ssize_t
ap2_event_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
}
@@ -258,18 +259,18 @@ static ssize_t
ap2_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -278,15 +279,15 @@ static DEVICE_ATTR_RW(ap2_event);
static ssize_t
power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
}
@@ -296,18 +297,18 @@ static ssize_t
userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freq_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freq_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -317,18 +318,18 @@ static ssize_t
userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntr_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -338,15 +339,15 @@ static ssize_t
userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
}
@@ -356,15 +357,15 @@ static ssize_t
userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntrsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)userclk_freqcntrsts);
@@ -388,10 +389,12 @@ static umode_t port_hdr_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
umode_t mode = attr->mode;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ fdata = to_dfl_feature_dev_data(dev);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
if (dfl_feature_revision(base) > 0) {
/*
@@ -456,21 +459,21 @@ static const struct dfl_feature_ops port_hdr_ops = {
static ssize_t
afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 guidl, guidh;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_AFU);
- mutex_lock(&pdata->lock);
- if (pdata->disable_count) {
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ if (fdata->disable_count) {
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
guidl = readq(base + GUID_L);
guidh = readq(base + GUID_H);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
}
@@ -485,12 +488,14 @@ static umode_t port_afu_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_AFU))
return 0;
return attr->mode;
@@ -504,9 +509,10 @@ static const struct attribute_group port_afu_group = {
static int port_afu_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_AFU,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -525,9 +531,10 @@ static const struct dfl_feature_ops port_afu_ops = {
static int port_stp_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_STP,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -595,22 +602,18 @@ static struct dfl_feature_driver port_feature_drvs[] = {
static int afu_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- pdata = dev_get_platdata(&fdev->dev);
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
+ dfl_feature_dev_use_count(fdata));
filp->private_data = fdev;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -618,29 +621,29 @@ static int afu_open(struct inode *inode, struct file *filp)
static int afu_release(struct inode *inode, struct file *filp)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata)) {
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata)) {
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- __port_reset(pdev);
- afu_dma_region_destroy(pdata);
+ __port_reset(fdata);
+ afu_dma_region_destroy(fdata);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -648,7 +651,7 @@ static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_get_info(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_info info;
struct dfl_afu *afu;
@@ -662,12 +665,12 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
if (info.argsz < minsz)
return -EINVAL;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
info.flags = 0;
info.num_regions = afu->num_regions;
info.num_umsgs = afu->num_umsgs;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
@@ -675,7 +678,7 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
return 0;
}
-static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_get_region_info(struct dfl_feature_dev_data *fdata,
void __user *arg)
{
struct dfl_fpga_port_region_info rinfo;
@@ -691,7 +694,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
if (rinfo.argsz < minsz || rinfo.padding)
return -EINVAL;
- ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ ret = afu_mmio_region_get_by_index(fdata, rinfo.index, &region);
if (ret)
return ret;
@@ -706,7 +709,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_map(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_map map;
unsigned long minsz;
@@ -720,16 +723,16 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
if (map.argsz < minsz || map.flags)
return -EINVAL;
- ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ ret = afu_dma_map_region(fdata, map.user_addr, map.length, &map.iova);
if (ret)
return ret;
if (copy_to_user(arg, &map, sizeof(map))) {
- afu_dma_unmap_region(pdata, map.iova);
+ afu_dma_unmap_region(fdata, map.iova);
return -EFAULT;
}
- dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ dev_dbg(&fdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
(unsigned long long)map.user_addr,
(unsigned long long)map.length,
(unsigned long long)map.iova);
@@ -738,7 +741,7 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
}
static long
-afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_unmap(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_unmap unmap;
unsigned long minsz;
@@ -751,33 +754,33 @@ afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
if (unmap.argsz < minsz || unmap.flags)
return -EINVAL;
- return afu_dma_unmap_region(pdata, unmap.iova);
+ return afu_dma_unmap_region(fdata, unmap.iova);
}
static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *f;
long ret;
dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return afu_ioctl_check_extension(pdata, arg);
+ return afu_ioctl_check_extension(fdata, arg);
case DFL_FPGA_PORT_GET_INFO:
- return afu_ioctl_get_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_GET_REGION_INFO:
- return afu_ioctl_get_region_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_region_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_MAP:
- return afu_ioctl_dma_map(pdata, (void __user *)arg);
+ return afu_ioctl_dma_map(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_UNMAP:
- return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
+ return afu_ioctl_dma_unmap(fdata, (void __user *)arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd
@@ -785,7 +788,7 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 and other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f)
+ dfl_fpga_dev_for_each_feature(fdata, f)
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -805,8 +808,8 @@ static const struct vm_operations_struct afu_vma_ops = {
static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
u64 size = vma->vm_end - vma->vm_start;
+ struct dfl_feature_dev_data *fdata;
struct dfl_afu_mmio_region region;
u64 offset;
int ret;
@@ -814,10 +817,10 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
offset = vma->vm_pgoff << PAGE_SHIFT;
- ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ ret = afu_mmio_region_get_by_offset(fdata, offset, size, &region);
if (ret)
return ret;
@@ -851,48 +854,45 @@ static const struct file_operations afu_fops = {
static int afu_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_afu *afu;
afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
if (!afu)
return -ENOMEM;
- afu->pdata = pdata;
-
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, afu);
- afu_mmio_region_init(pdata);
- afu_dma_region_init(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, afu);
+ afu_mmio_region_init(fdata);
+ afu_dma_region_init(fdata);
+ mutex_unlock(&fdata->lock);
return 0;
}
static int afu_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- afu_mmio_region_destroy(pdata);
- afu_dma_region_destroy(pdata);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ afu_mmio_region_destroy(fdata);
+ afu_dma_region_destroy(fdata);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static int port_enable_set(struct platform_device *pdev, bool enable)
+static int port_enable_set(struct dfl_feature_dev_data *fdata, bool enable)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
if (enable)
- ret = __afu_port_enable(pdev);
+ ret = __afu_port_enable(fdata);
else
- ret = __afu_port_disable(pdev);
- mutex_unlock(&pdata->lock);
+ ret = __afu_port_disable(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -932,15 +932,13 @@ exit:
return ret;
}
-static int afu_remove(struct platform_device *pdev)
+static void afu_remove(struct platform_device *pdev)
{
dev_dbg(&pdev->dev, "%s\n", __func__);
dfl_fpga_dev_ops_unregister(pdev);
dfl_fpga_dev_feature_uinit(pdev);
afu_dev_destroy(pdev);
-
- return 0;
}
static const struct attribute_group *afu_dev_groups[] = {
@@ -951,12 +949,12 @@ static const struct attribute_group *afu_dev_groups[] = {
};
static struct platform_driver afu_driver = {
- .driver = {
- .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
.dev_groups = afu_dev_groups,
},
- .probe = afu_probe,
- .remove = afu_remove,
+ .probe = afu_probe,
+ .remove = afu_remove,
};
static int __init afu_init(void)
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
index 0804b7a0c298..b11a5b21e666 100644
--- a/drivers/fpga/dfl-afu-region.c
+++ b/drivers/fpga/dfl-afu-region.c
@@ -12,11 +12,11 @@
/**
* afu_mmio_region_init - init function for afu mmio region support
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
INIT_LIST_HEAD(&afu->regions);
}
@@ -39,6 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
/**
* afu_mmio_region_add - add a mmio region to given feature dev.
*
+ * @fdata: afu feature dev data
* @region_index: region index.
* @region_size: region size.
* @phys: region's physical address of this region.
@@ -46,14 +47,15 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_mmio_region *region;
struct dfl_afu *afu;
int ret = 0;
- region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
@@ -62,13 +64,13 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
region->phys = phys;
region->flags = flags;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
/* check if @index already exists */
if (get_region_by_index(afu, region_index)) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
ret = -EEXIST;
goto exit;
}
@@ -79,37 +81,37 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
afu->region_cur_offset += region_size;
afu->num_regions++;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
exit:
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(dev, region);
return ret;
}
/**
* afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct dfl_afu_mmio_region *tmp, *region;
list_for_each_entry_safe(region, tmp, &afu->regions, node)
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(&fdata->dev->dev, region);
}
/**
* afu_mmio_region_get_by_index - find an afu region by index.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @pregion: ptr to region for result.
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion)
{
@@ -117,8 +119,8 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
region = get_region_by_index(afu, region_index);
if (!region) {
ret = -EINVAL;
@@ -126,14 +128,14 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
}
*pregion = *region;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
/**
* afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @offset: region offset from start of the device fd.
* @size: region size.
* @pregion: ptr to region for result.
@@ -143,7 +145,7 @@ exit:
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion)
{
@@ -151,8 +153,8 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
for_each_region(region, afu)
if (region->offset <= offset &&
region->offset + region->size >= offset + size) {
@@ -161,6 +163,6 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
}
ret = -EINVAL;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index e5020e2b1f3d..03be4f0969c7 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -41,7 +41,7 @@ struct dfl_afu_mmio_region {
};
/**
- * struct fpga_afu_dma_region - afu DMA region data structure
+ * struct dfl_afu_dma_region - afu DMA region data structure
*
* @user_addr: region userspace virtual address.
* @length: region length.
@@ -67,7 +67,6 @@ struct dfl_afu_dma_region {
* @regions: the mmio region linked list of this afu feature device.
* @dma_regions: root of dma regions rb tree.
* @num_umsgs: num of umsgs.
- * @pdata: afu platform device's pdata.
*/
struct dfl_afu {
u64 region_cur_offset;
@@ -75,31 +74,29 @@ struct dfl_afu {
u8 num_umsgs;
struct list_head regions;
struct rb_root dma_regions;
-
- struct dfl_feature_platform_data *pdata;
};
-/* hold pdata->lock when call __afu_port_enable/disable */
-int __afu_port_enable(struct platform_device *pdev);
-int __afu_port_disable(struct platform_device *pdev);
+/* hold fdata->lock when call __afu_port_enable/disable */
+int __afu_port_enable(struct dfl_feature_dev_data *fdata);
+int __afu_port_disable(struct dfl_feature_dev_data *fdata);
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags);
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion);
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion);
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata);
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova);
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova);
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+afu_dma_region_find(struct dfl_feature_dev_data *fdata,
u64 iova, u64 size);
extern const struct dfl_feature_ops port_err_ops;
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 808d1f4d76df..28b0f9d062ac 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -22,34 +22,34 @@
struct fme_br_priv {
struct dfl_fme_br_pdata *pdata;
struct dfl_fpga_port_ops *port_ops;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
};
static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
{
struct fme_br_priv *priv = bridge->priv;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
struct dfl_fpga_port_ops *ops;
- if (!priv->port_pdev) {
- port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
- &priv->pdata->port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ if (!priv->port_fdata) {
+ port_fdata = dfl_fpga_cdev_find_port_data(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_fdata)
return -ENODEV;
- priv->port_pdev = port_pdev;
+ priv->port_fdata = port_fdata;
}
- if (priv->port_pdev && !priv->port_ops) {
- ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (priv->port_fdata && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_fdata);
if (!ops || !ops->enable_set)
return -ENOENT;
priv->port_ops = ops;
}
- return priv->port_ops->enable_set(priv->port_pdev, enable);
+ return priv->port_ops->enable_set(priv->port_fdata, enable);
}
static const struct fpga_bridge_ops fme_bridge_ops = {
@@ -78,27 +78,23 @@ static int fme_br_probe(struct platform_device *pdev)
return 0;
}
-static int fme_br_remove(struct platform_device *pdev)
+static void fme_br_remove(struct platform_device *pdev)
{
struct fpga_bridge *br = platform_get_drvdata(pdev);
struct fme_br_priv *priv = br->priv;
fpga_bridge_unregister(br);
- if (priv->port_pdev)
- put_device(&priv->port_pdev->dev);
if (priv->port_ops)
dfl_fpga_port_ops_put(priv->port_ops);
-
- return 0;
}
static struct platform_driver fme_br_driver = {
- .driver = {
- .name = DFL_FPGA_FME_BRIDGE,
+ .driver = {
+ .name = DFL_FPGA_FME_BRIDGE,
},
- .probe = fme_br_probe,
- .remove = fme_br_remove,
+ .probe = fme_br_probe,
+ .remove = fme_br_remove,
};
module_platform_driver(fme_br_driver);
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
index 51c2892ec06d..f00d949efe69 100644
--- a/drivers/fpga/dfl-fme-error.c
+++ b/drivers/fpga/dfl-fme-error.c
@@ -42,15 +42,15 @@
static ssize_t pcie0_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE0_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -59,7 +59,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -67,9 +67,9 @@ static ssize_t pcie0_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
v = readq(base + PCIE0_ERROR);
@@ -79,7 +79,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE0_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie0_errors);
@@ -87,15 +87,15 @@ static DEVICE_ATTR_RW(pcie0_errors);
static ssize_t pcie1_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE1_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -104,7 +104,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -112,9 +112,9 @@ static ssize_t pcie1_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
v = readq(base + PCIE1_ERROR);
@@ -124,7 +124,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE1_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie1_errors);
@@ -132,9 +132,10 @@ static DEVICE_ATTR_RW(pcie1_errors);
static ssize_t nonfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_NONFAT_ERROR));
@@ -144,9 +145,10 @@ static DEVICE_ATTR_RO(nonfatal_errors);
static ssize_t catfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_CATFAT_ERROR));
@@ -156,15 +158,15 @@ static DEVICE_ATTR_RO(catfatal_errors);
static ssize_t inject_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
@@ -174,7 +176,7 @@ static ssize_t inject_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u8 inject_error;
u64 v;
@@ -185,14 +187,14 @@ static ssize_t inject_errors_store(struct device *dev,
if (inject_error & ~INJECT_ERROR_MASK)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
v &= ~INJECT_ERROR_MASK;
v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
writeq(v, base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -201,15 +203,15 @@ static DEVICE_ATTR_RW(inject_errors);
static ssize_t fme_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -218,7 +220,7 @@ static ssize_t fme_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v, val;
int ret = 0;
@@ -226,9 +228,9 @@ static ssize_t fme_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
v = readq(base + FME_ERROR);
@@ -240,7 +242,7 @@ static ssize_t fme_errors_store(struct device *dev,
/* Workaround: disable MBP_ERROR if feature revision is 0 */
writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
base + FME_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(fme_errors);
@@ -248,15 +250,15 @@ static DEVICE_ATTR_RW(fme_errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -265,15 +267,15 @@ static DEVICE_ATTR_RO(first_error);
static ssize_t next_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_NEXT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -295,12 +297,14 @@ static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
+ if (!dfl_get_feature_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR))
return 0;
return attr->mode;
@@ -314,12 +318,12 @@ const struct attribute_group fme_global_err_group = {
static void fme_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/* Workaround: keep MBP_ERROR always masked if revision is 0 */
if (dfl_feature_revision(base))
@@ -332,7 +336,7 @@ static void fme_err_mask(struct device *dev, bool mask)
writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
}
static int fme_global_err_init(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 77ea04d4edbe..8aca2fb20e87 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/units.h>
#include <linux/fpga-dfl.h>
#include "dfl.h"
@@ -27,10 +28,11 @@
static ssize_t ports_num_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -46,10 +48,11 @@ static DEVICE_ATTR_RO(ports_num);
static ssize_t bitstream_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_ID);
@@ -64,10 +67,11 @@ static DEVICE_ATTR_RO(bitstream_id);
static ssize_t bitstream_metadata_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_MD);
@@ -78,10 +82,11 @@ static DEVICE_ATTR_RO(bitstream_metadata);
static ssize_t cache_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -93,10 +98,11 @@ static DEVICE_ATTR_RO(cache_size);
static ssize_t fabric_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -108,10 +114,11 @@ static DEVICE_ATTR_RO(fabric_version);
static ssize_t socket_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -134,10 +141,10 @@ static const struct attribute_group fme_hdr_group = {
.attrs = fme_hdr_attrs,
};
-static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_release_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -146,10 +153,10 @@ static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
return dfl_fpga_cdev_release_port(cdev, port_id);
}
-static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_assign_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -162,13 +169,13 @@ static long fme_hdr_ioctl(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_FME_PORT_RELEASE:
- return fme_hdr_ioctl_release_port(pdata, arg);
+ return fme_hdr_ioctl_release_port(fdata, arg);
case DFL_FPGA_FME_PORT_ASSIGN:
- return fme_hdr_ioctl_assign_port(pdata, arg);
+ return fme_hdr_ioctl_assign_port(fdata, arg);
}
return -ENODEV;
@@ -231,19 +238,19 @@ static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_temp_input:
v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
- *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
+ *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * MILLI);
break;
case hwmon_temp_max:
v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
- *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * MILLI);
break;
case hwmon_temp_crit:
v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
- *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * MILLI);
break;
case hwmon_temp_emergency:
v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
- *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
+ *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * MILLI);
break;
case hwmon_temp_max_alarm:
v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
@@ -265,7 +272,7 @@ static const struct hwmon_ops thermal_hwmon_ops = {
.read = thermal_hwmon_read,
};
-static const struct hwmon_channel_info *thermal_hwmon_info[] = {
+static const struct hwmon_channel_info * const thermal_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
HWMON_T_MAX | HWMON_T_MAX_ALARM |
HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
@@ -382,15 +389,15 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_power_input:
v = readq(feature->ioaddr + FME_PWR_STATUS);
- *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
+ *val = (long)(FIELD_GET(PWR_CONSUMED, v) * MICRO);
break;
case hwmon_power_max:
v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
- *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * MICRO);
break;
case hwmon_power_crit:
v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
- *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * MICRO);
break;
case hwmon_power_max_alarm:
v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
@@ -410,14 +417,14 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev->parent);
struct dfl_feature *feature = dev_get_drvdata(dev);
int ret = 0;
u64 v;
- val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
+ val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
switch (attr) {
case hwmon_power_max:
@@ -437,7 +444,7 @@ static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
break;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -465,7 +472,7 @@ static const struct hwmon_ops power_hwmon_ops = {
.write = power_hwmon_write,
};
-static const struct hwmon_channel_info *power_hwmon_info[] = {
+static const struct hwmon_channel_info * const power_hwmon_info[] = {
HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
HWMON_P_MAX | HWMON_P_MAX_ALARM |
HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
@@ -588,7 +595,7 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
},
};
-static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long fme_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -597,49 +604,46 @@ static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
static int fme_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
- filp->private_data = pdata;
+ dfl_feature_dev_use_count(fdata));
+ filp->private_data = fdata;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static int fme_release(struct inode *inode, struct file *filp)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata))
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata))
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *f;
long ret;
@@ -649,7 +653,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return fme_ioctl_check_extension(pdata, arg);
+ return fme_ioctl_check_extension(fdata, arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd.
@@ -657,7 +661,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 or other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f) {
+ dfl_fpga_dev_for_each_feature(fdata, f) {
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -671,29 +675,27 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static int fme_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme *fme;
fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
if (!fme)
return -ENOMEM;
- fme->pdata = pdata;
-
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, fme);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, fme);
+ mutex_unlock(&fdata->lock);
return 0;
}
static void fme_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
}
static const struct file_operations fme_fops = {
@@ -729,13 +731,11 @@ exit:
return ret;
}
-static int fme_remove(struct platform_device *pdev)
+static void fme_remove(struct platform_device *pdev)
{
dfl_fpga_dev_ops_unregister(pdev);
dfl_fpga_dev_feature_uinit(pdev);
fme_dev_destroy(pdev);
-
- return 0;
}
static const struct attribute_group *fme_dev_groups[] = {
@@ -745,12 +745,12 @@ static const struct attribute_group *fme_dev_groups[] = {
};
static struct platform_driver fme_driver = {
- .driver = {
- .name = DFL_FPGA_FEATURE_DEV_FME,
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_FME,
.dev_groups = fme_dev_groups,
},
- .probe = fme_probe,
- .remove = fme_remove,
+ .probe = fme_probe,
+ .remove = fme_remove,
};
module_platform_driver(fme_driver);
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
index af0785783b52..ab228d8837a0 100644
--- a/drivers/fpga/dfl-fme-mgr.c
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -280,7 +280,6 @@ static int fme_mgr_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fme_mgr_priv *priv;
struct fpga_manager *mgr;
- struct resource *res;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -290,8 +289,7 @@ static int fme_mgr_probe(struct platform_device *pdev)
priv->ioaddr = pdata->ioaddr;
if (!priv->ioaddr) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->ioaddr = devm_ioremap_resource(dev, res);
+ priv->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->ioaddr))
return PTR_ERR(priv->ioaddr);
}
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
index 587c82be12f7..7422d2bc6f37 100644
--- a/drivers/fpga/dfl-fme-perf.c
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -141,7 +141,7 @@
* @fab_port_id: used to indicate current working mode of fabric counters.
* @fab_lock: lock to protect fabric counters working mode.
* @cpu: active CPU to which the PMU is bound for accesses.
- * @cpuhp_node: node for CPU hotplug notifier link.
+ * @node: node for CPU hotplug notifier link.
* @cpuhp_state: state for CPU hotplug notification;
*/
struct fme_perf_priv {
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index d61ce9a18879..b878b260af38 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -65,7 +65,7 @@ static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
static int fme_pr(struct platform_device *pdev, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
void __user *argp = (void __user *)arg;
struct dfl_fpga_fme_port_pr port_pr;
struct fpga_image_info *info;
@@ -87,8 +87,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
return -EINVAL;
/* get fme header region */
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
/* check port id */
v = readq(fme_hdr + FME_HDR_CAP);
@@ -123,8 +122,8 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
- mutex_lock(&pdata->lock);
- fme = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ fme = dfl_fpga_fdata_get_private(fdata);
/* fme device has been unregistered. */
if (!fme) {
ret = -EINVAL;
@@ -156,7 +155,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
put_device(&region->dev);
unlock_exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
free_exit:
vfree(buf);
return ret;
@@ -164,16 +163,16 @@ free_exit:
/**
* dfl_fme_create_mgr - create fpga mgr platform device as child device
- *
- * @pdata: fme platform_device's pdata
+ * @fdata: fme feature dev data
+ * @feature: sub feature info
*
* Return: mgr platform device if successful, and error code otherwise.
*/
static struct platform_device *
-dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_mgr(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *mgr, *fme = pdata->dev;
+ struct platform_device *mgr, *fme = fdata->dev;
struct dfl_fme_mgr_pdata mgr_pdata;
int ret = -ENOMEM;
@@ -209,11 +208,11 @@ create_mgr_err:
/**
* dfl_fme_destroy_mgr - destroy fpga mgr platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_mgr(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
platform_device_unregister(priv->mgr);
}
@@ -221,15 +220,15 @@ static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_bridge - create fme fpga bridge platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @port_id: port id for the bridge to be created.
*
* Return: bridge platform device if successful, and error code otherwise.
*/
static struct dfl_fme_bridge *
-dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+dfl_fme_create_bridge(struct dfl_feature_dev_data *fdata, int port_id)
{
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_br_pdata br_pdata;
struct dfl_fme_bridge *fme_br;
int ret = -ENOMEM;
@@ -238,7 +237,7 @@ dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
if (!fme_br)
return ERR_PTR(ret);
- br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.cdev = fdata->dfl_cdev;
br_pdata.port_id = port_id;
fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
@@ -273,12 +272,12 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
}
/**
- * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
- * @pdata: fme platform device's pdata
+ * dfl_fme_destroy_bridges - destroy all fpga bridge platform device
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_bridges(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_bridge *fbridge, *tmp;
list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
@@ -290,7 +289,7 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_region - create fpga region platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @mgr: mgr platform device needed for region
* @br: br platform device needed for region
* @port_id: port id
@@ -298,12 +297,12 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
* Return: fme region if successful, and error code otherwise.
*/
static struct dfl_fme_region *
-dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_region(struct dfl_feature_dev_data *fdata,
struct platform_device *mgr,
struct platform_device *br, int port_id)
{
struct dfl_fme_region_pdata region_pdata;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_region *fme_region;
int ret = -ENOMEM;
@@ -353,11 +352,11 @@ static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
/**
* dfl_fme_destroy_regions - destroy all fme regions
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_regions(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_region *fme_region, *tmp;
list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
@@ -369,7 +368,7 @@ static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
static int pr_mgmt_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme_region *fme_region;
struct dfl_fme_bridge *fme_br;
struct platform_device *mgr;
@@ -378,18 +377,17 @@ static int pr_mgmt_init(struct platform_device *pdev,
int ret = -ENODEV, i = 0;
u64 fme_cap, port_offset;
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
- priv = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ priv = dfl_fpga_fdata_get_private(fdata);
/* Initialize the region and bridge sub device list */
INIT_LIST_HEAD(&priv->region_list);
INIT_LIST_HEAD(&priv->bridge_list);
/* Create fpga mgr platform device */
- mgr = dfl_fme_create_mgr(pdata, feature);
+ mgr = dfl_fme_create_mgr(fdata, feature);
if (IS_ERR(mgr)) {
dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
goto unlock;
@@ -405,7 +403,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
continue;
/* Create bridge for each port */
- fme_br = dfl_fme_create_bridge(pdata, i);
+ fme_br = dfl_fme_create_bridge(fdata, i);
if (IS_ERR(fme_br)) {
ret = PTR_ERR(fme_br);
goto destroy_region;
@@ -414,7 +412,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_br->node, &priv->bridge_list);
/* Create region for each port */
- fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_region = dfl_fme_create_region(fdata, mgr,
fme_br->br, i);
if (IS_ERR(fme_region)) {
ret = PTR_ERR(fme_region);
@@ -423,30 +421,30 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_region->node, &priv->region_list);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
destroy_region:
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
unlock:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static void pr_mgmt_uinit(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
- mutex_unlock(&pdata->lock);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
+ mutex_unlock(&fdata->lock);
}
static long fme_pr_ioctl(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl-fme-pr.h b/drivers/fpga/dfl-fme-pr.h
index 096a699089d3..761f80f63312 100644
--- a/drivers/fpga/dfl-fme-pr.h
+++ b/drivers/fpga/dfl-fme-pr.h
@@ -58,7 +58,7 @@ struct dfl_fme_bridge {
};
/**
- * struct dfl_fme_bridge_pdata - platform data for FME bridge platform device.
+ * struct dfl_fme_br_pdata - platform data for FME bridge platform device.
*
* @cdev: container device.
* @port_id: port id.
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 4aebde0a7f1c..c6cd63063c82 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -61,23 +61,21 @@ eprobe_mgr_put:
return ret;
}
-static int fme_region_remove(struct platform_device *pdev)
+static void fme_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = platform_get_drvdata(pdev);
struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
fpga_mgr_put(mgr);
-
- return 0;
}
static struct platform_driver fme_region_driver = {
- .driver = {
- .name = DFL_FPGA_FME_REGION,
+ .driver = {
+ .name = DFL_FPGA_FME_REGION,
},
- .probe = fme_region_probe,
- .remove = fme_region_remove,
+ .probe = fme_region_probe,
+ .remove = fme_region_remove,
};
module_platform_driver(fme_region_driver);
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
index 4195dd68193e..a566dbc2b485 100644
--- a/drivers/fpga/dfl-fme.h
+++ b/drivers/fpga/dfl-fme.h
@@ -24,13 +24,11 @@
* @mgr: FME's FPGA manager platform device.
* @region_list: linked list of FME's FPGA regions.
* @bridge_list: linked list of FME's FPGA bridges.
- * @pdata: fme platform device's pdata.
*/
struct dfl_fme {
struct platform_device *mgr;
struct list_head region_list;
struct list_head bridge_list;
- struct dfl_feature_platform_data *pdata;
};
extern const struct dfl_feature_ops fme_pr_mgmt_ops;
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 0914e7328b1a..602807d6afcc 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/errno.h>
-#include <linux/aer.h>
#include "dfl.h"
@@ -40,14 +39,6 @@ struct cci_drvdata {
struct dfl_fpga_cdev *cdev; /* container device */
};
-static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
-{
- if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
- return NULL;
-
- return pcim_iomap_table(pcidev)[0];
-}
-
static int cci_pci_alloc_irq(struct pci_dev *pcidev)
{
int ret, nvec = pci_msix_vec_count(pcidev);
@@ -79,6 +70,7 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
#define PCIE_DEVICE_ID_INTEL_DFL 0xbcce
/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
+#define PCIE_SUBDEVICE_ID_INTEL_D5005 0x138d
#define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770
#define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771
#define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4
@@ -103,6 +95,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_D5005),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
@@ -157,19 +151,12 @@ static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
{
- u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res;
- int dfl_res_off, i, bars, voff = 0;
+ u32 bir, offset, dfl_cnt, dfl_res;
+ int dfl_res_off, i, bars, voff;
resource_size_t start, len;
- while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) {
- vndr_hdr = 0;
- pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr);
-
- if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS &&
- pcidev->vendor == PCI_VENDOR_ID_INTEL)
- break;
- }
-
+ voff = pci_find_vsec_capability(pcidev, PCI_VENDOR_ID_INTEL,
+ PCI_VSEC_ID_INTEL_DFLS);
if (!voff) {
dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
return -ENODEV;
@@ -240,9 +227,9 @@ static int find_dfls_by_default(struct pci_dev *pcidev,
u64 v;
/* start to find Device Feature List from Bar 0 */
- base = cci_pci_ioremap_bar0(pcidev);
- if (!base)
- return -ENOMEM;
+ base = pcim_iomap_region(pcidev, 0, DRV_NAME);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
/*
* PF device has FME and Ports/AFUs, and VF device only has one
@@ -301,7 +288,7 @@ static int find_dfls_by_default(struct pci_dev *pcidev,
}
/* release I/O mappings for next step enumeration */
- pcim_iounmap_regions(pcidev, BIT(0));
+ pcim_iounmap_region(pcidev, 0);
return ret;
}
@@ -376,10 +363,6 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
return ret;
}
- ret = pci_enable_pcie_error_reporting(pcidev);
- if (ret && ret != -EINVAL)
- dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
-
pci_set_master(pcidev);
ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
@@ -387,24 +370,22 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pcidev->dev, "No suitable DMA support available.\n");
- goto disable_error_report_exit;
+ return ret;
}
ret = cci_init_drvdata(pcidev);
if (ret) {
dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
- goto disable_error_report_exit;
+ return ret;
}
ret = cci_enumerate_feature_devs(pcidev);
- if (!ret)
+ if (ret) {
+ dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
return ret;
+ }
- dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
-
-disable_error_report_exit:
- pci_disable_pcie_error_reporting(pcidev);
- return ret;
+ return 0;
}
static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
@@ -448,7 +429,6 @@ static void cci_pci_remove(struct pci_dev *pcidev)
cci_pci_sriov_configure(pcidev, 0);
cci_remove_feature_devs(pcidev);
- pci_disable_pcie_error_reporting(pcidev);
}
static struct pci_driver cci_pci_driver = {
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index b9aae85ba930..7022657243c0 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -13,6 +13,7 @@
#include <linux/dfl.h>
#include <linux/fpga-dfl.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/uaccess.h>
#include "dfl.h"
@@ -45,7 +46,7 @@ static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
};
/**
- * dfl_dev_info - dfl feature device information.
+ * struct dfl_dev_info - dfl feature device information.
* @name: name string of the feature platform device.
* @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
* @id: idr id of the feature dev.
@@ -67,7 +68,7 @@ static struct dfl_dev_info dfl_devs[] = {
};
/**
- * dfl_chardev_info - chardev information of dfl feature device
+ * struct dfl_chardev_info - chardev information of dfl feature device
* @name: nmae string of the char device.
* @devt: devt of the char device.
*/
@@ -118,17 +119,6 @@ static void dfl_id_free(enum dfl_id_type type, int id)
mutex_unlock(&dfl_id_mutex);
}
-static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
- if (!strcmp(dfl_devs[i].name, pdev->name))
- return i;
-
- return DFL_ID_MAX;
-}
-
static enum dfl_id_type dfh_id_to_type(u16 id)
{
int i;
@@ -155,12 +145,12 @@ static LIST_HEAD(dfl_port_ops_list);
/**
* dfl_fpga_port_ops_get - get matched port ops from the global list
- * @pdev: platform device to match with associated port ops.
+ * @fdata: feature dev data to match with associated port ops.
* Return: matched port ops on success, NULL otherwise.
*
* Please note that must dfl_fpga_port_ops_put after use the port_ops.
*/
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata)
{
struct dfl_fpga_port_ops *ops = NULL;
@@ -170,7 +160,7 @@ struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
list_for_each_entry(ops, &dfl_port_ops_list, node) {
/* match port_ops using the name of platform device */
- if (!strcmp(pdev->name, ops->name)) {
+ if (!strcmp(fdata->pdev_name, ops->name)) {
if (!try_module_get(ops->owner))
ops = NULL;
goto done;
@@ -221,27 +211,26 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
/**
* dfl_fpga_check_port_id - check the port id
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
* @pport_id: port id to compare.
*
* Return: 1 if port device matches with given port id, otherwise 0.
*/
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct dfl_fpga_port_ops *port_ops;
- if (pdata->id != FEATURE_DEV_ID_UNUSED)
- return pdata->id == *(int *)pport_id;
+ if (fdata->id != FEATURE_DEV_ID_UNUSED)
+ return fdata->id == *(int *)pport_id;
- port_ops = dfl_fpga_port_ops_get(pdev);
+ port_ops = dfl_fpga_port_ops_get(fdata);
if (!port_ops || !port_ops->get_id)
return 0;
- pdata->id = port_ops->get_id(pdev);
+ fdata->id = port_ops->get_id(fdata);
dfl_fpga_port_ops_put(port_ops);
- return pdata->id == *(int *)pport_id;
+ return fdata->id == *(int *)pport_id;
}
EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
@@ -256,10 +245,10 @@ dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev)
return NULL;
}
-static int dfl_bus_match(struct device *dev, struct device_driver *drv)
+static int dfl_bus_match(struct device *dev, const struct device_driver *drv)
{
struct dfl_device *ddev = to_dfl_dev(dev);
- struct dfl_driver *ddrv = to_dfl_drv(drv);
+ const struct dfl_driver *ddrv = to_dfl_drv(drv);
const struct dfl_device_id *id_entry;
id_entry = ddrv->id_table;
@@ -293,9 +282,9 @@ static void dfl_bus_remove(struct device *dev)
ddrv->remove(ddev);
}
-static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int dfl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct dfl_device *ddev = to_dfl_dev(dev);
+ const struct dfl_device *ddev = to_dfl_dev(dev);
return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X",
ddev->type, ddev->feature_id);
@@ -326,7 +315,7 @@ static struct attribute *dfl_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(dfl_dev);
-static struct bus_type dfl_bus_type = {
+static const struct bus_type dfl_bus_type = {
.name = "dfl",
.match = dfl_bus_match,
.probe = dfl_bus_probe,
@@ -342,16 +331,18 @@ static void release_dfl_dev(struct device *dev)
if (ddev->mmio_res.parent)
release_resource(&ddev->mmio_res);
+ kfree(ddev->params);
+
ida_free(&dfl_device_ida, ddev->id);
kfree(ddev->irqs);
kfree(ddev);
}
static struct dfl_device *
-dfl_dev_add(struct dfl_feature_platform_data *pdata,
+dfl_dev_add(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *pdev = pdata->dev;
+ struct platform_device *pdev = fdata->dev;
struct resource *parent_res;
struct dfl_device *ddev;
int id, i, ret;
@@ -377,10 +368,19 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
if (ret)
goto put_dev;
- ddev->type = feature_dev_id_type(pdev);
+ ddev->type = fdata->type;
ddev->feature_id = feature->id;
ddev->revision = feature->revision;
- ddev->cdev = pdata->dfl_cdev;
+ ddev->dfh_version = feature->dfh_version;
+ ddev->cdev = fdata->dfl_cdev;
+ if (feature->param_size) {
+ ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL);
+ if (!ddev->params) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+ ddev->param_size = feature->param_size;
+ }
/* add mmio resource */
parent_res = &pdev->resource[feature->resource_index];
@@ -423,11 +423,11 @@ put_dev:
return ERR_PTR(ret);
}
-static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
+static void dfl_devs_remove(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ddev) {
device_unregister(&feature->ddev->dev);
feature->ddev = NULL;
@@ -435,13 +435,13 @@ static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
}
}
-static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
+static int dfl_devs_add(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
struct dfl_device *ddev;
int ret;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ioaddr)
continue;
@@ -450,7 +450,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
goto err;
}
- ddev = dfl_dev_add(pdata, feature);
+ ddev = dfl_dev_add(fdata, feature);
if (IS_ERR(ddev)) {
ret = PTR_ERR(ddev);
goto err;
@@ -462,7 +462,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
return 0;
err:
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
return ret;
}
@@ -492,12 +492,12 @@ EXPORT_SYMBOL(dfl_driver_unregister);
*/
void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature *feature;
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ops) {
if (feature->ops->uinit)
feature->ops->uinit(pdev, feature);
@@ -508,7 +508,6 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
static int dfl_feature_instance_init(struct platform_device *pdev,
- struct dfl_feature_platform_data *pdata,
struct dfl_feature *feature,
struct dfl_feature_driver *drv)
{
@@ -567,16 +566,15 @@ static bool dfl_feature_drv_match(struct dfl_feature *feature,
int dfl_fpga_dev_feature_init(struct platform_device *pdev,
struct dfl_feature_driver *feature_drvs)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature_driver *drv = feature_drvs;
struct dfl_feature *feature;
int ret;
while (drv->ops) {
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (dfl_feature_drv_match(feature, drv)) {
- ret = dfl_feature_instance_init(pdev, pdata,
- feature, drv);
+ ret = dfl_feature_instance_init(pdev, feature, drv);
if (ret)
goto exit;
}
@@ -584,7 +582,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
drv++;
}
- ret = dfl_devs_add(pdata);
+ ret = dfl_devs_add(fdata);
if (ret)
goto exit;
@@ -683,7 +681,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
* @nr_irqs: number of irqs for all feature devices.
* @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
* this device.
- * @feature_dev: current feature device.
+ * @type: the current FIU type.
* @ioaddr: header register region address of current FIU in enumeration.
* @start: register resource start of current FIU.
* @len: max register resource length of current FIU.
@@ -696,7 +694,7 @@ struct build_feature_devs_info {
unsigned int nr_irqs;
int *irq_table;
- struct platform_device *feature_dev;
+ enum dfl_id_type type;
void __iomem *ioaddr;
resource_size_t start;
resource_size_t len;
@@ -708,66 +706,85 @@ struct build_feature_devs_info {
* struct dfl_feature_info - sub feature info collected during feature dev build
*
* @fid: id of this sub feature.
+ * @revision: revision of this sub feature
+ * @dfh_version: version of Device Feature Header (DFH)
* @mmio_res: mmio resource of this sub feature.
* @ioaddr: mapped base address of mmio resource.
* @node: node in sub_features linked list.
* @irq_base: start of irq index in this sub feature.
* @nr_irqs: number of irqs of this sub feature.
+ * @param_size: size DFH parameters.
+ * @params: DFH parameter data.
*/
struct dfl_feature_info {
u16 fid;
u8 revision;
+ u8 dfh_version;
struct resource mmio_res;
void __iomem *ioaddr;
struct list_head node;
unsigned int irq_base;
unsigned int nr_irqs;
+ unsigned int param_size;
+ u64 params[];
};
-static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
- struct platform_device *port)
+static void dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev *cdev,
+ struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
-
mutex_lock(&cdev->lock);
- list_add(&pdata->node, &cdev->port_dev_list);
- get_device(&pdata->dev->dev);
+ list_add(&fdata->node, &cdev->port_dev_list);
mutex_unlock(&cdev->lock);
}
-/*
- * register current feature device, it is called when we need to switch to
- * another feature parsing or we have parsed all features on given device
- * feature list.
- */
-static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+static void dfl_id_free_action(void *arg)
+{
+ struct dfl_feature_dev_data *fdata = arg;
+
+ dfl_id_free(fdata->type, fdata->pdev_id);
+}
+
+static struct dfl_feature_dev_data *
+binfo_create_feature_dev_data(struct build_feature_devs_info *binfo)
{
- struct platform_device *fdev = binfo->feature_dev;
- struct dfl_feature_platform_data *pdata;
+ enum dfl_id_type type = binfo->type;
struct dfl_feature_info *finfo, *p;
- enum dfl_id_type type;
+ struct dfl_feature_dev_data *fdata;
int ret, index = 0, res_idx = 0;
- type = feature_dev_id_type(fdev);
if (WARN_ON_ONCE(type >= DFL_ID_MAX))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- /*
- * we do not need to care for the memory which is associated with
- * the platform device. After calling platform_device_unregister(),
- * it will be automatically freed by device's release() callback,
- * platform_device_release().
- */
- pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ fdata = devm_kzalloc(binfo->dev, sizeof(*fdata), GFP_KERNEL);
+ if (!fdata)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->features = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->features), GFP_KERNEL);
+ if (!fdata->features)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->resources = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->resources), GFP_KERNEL);
+ if (!fdata->resources)
+ return ERR_PTR(-ENOMEM);
- pdata->dev = fdev;
- pdata->num = binfo->feature_num;
- pdata->dfl_cdev = binfo->cdev;
- pdata->id = FEATURE_DEV_ID_UNUSED;
- mutex_init(&pdata->lock);
- lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
+ fdata->type = type;
+
+ fdata->pdev_id = dfl_id_alloc(type, binfo->dev);
+ if (fdata->pdev_id < 0)
+ return ERR_PTR(fdata->pdev_id);
+
+ ret = devm_add_action_or_reset(binfo->dev, dfl_id_free_action, fdata);
+ if (ret)
+ return ERR_PTR(ret);
+
+ fdata->pdev_name = dfl_devs[type].name;
+ fdata->num = binfo->feature_num;
+ fdata->dfl_cdev = binfo->cdev;
+ fdata->id = FEATURE_DEV_ID_UNUSED;
+ mutex_init(&fdata->lock);
+ lockdep_set_class_and_name(&fdata->lock, &dfl_pdata_keys[type],
dfl_pdata_key_strings[type]);
/*
@@ -776,28 +793,28 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
* works properly for port device.
* and it should always be 0 for fme device.
*/
- WARN_ON(pdata->disable_count);
-
- fdev->dev.platform_data = pdata;
-
- /* each sub feature has one MMIO resource */
- fdev->num_resources = binfo->feature_num;
- fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
- GFP_KERNEL);
- if (!fdev->resource)
- return -ENOMEM;
+ WARN_ON(fdata->disable_count);
/* fill features and resource information for feature dev */
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- struct dfl_feature *feature = &pdata->features[index++];
+ struct dfl_feature *feature = &fdata->features[index++];
struct dfl_feature_irq_ctx *ctx;
unsigned int i;
/* save resource information for each feature */
- feature->dev = fdev;
feature->id = finfo->fid;
feature->revision = finfo->revision;
+ feature->dfh_version = finfo->dfh_version;
+
+ if (finfo->param_size) {
+ feature->params = devm_kmemdup(binfo->dev,
+ finfo->params, finfo->param_size,
+ GFP_KERNEL);
+ if (!feature->params)
+ return ERR_PTR(-ENOMEM);
+ feature->param_size = finfo->param_size;
+ }
/*
* the FIU header feature has some fundamental functions (sriov
* set, port enable/disable) needed for the dfl bus device and
@@ -811,17 +828,17 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
devm_ioremap_resource(binfo->dev,
&finfo->mmio_res);
if (IS_ERR(feature->ioaddr))
- return PTR_ERR(feature->ioaddr);
+ return ERR_CAST(feature->ioaddr);
} else {
feature->resource_index = res_idx;
- fdev->resource[res_idx++] = finfo->mmio_res;
+ fdata->resources[res_idx++] = finfo->mmio_res;
}
if (finfo->nr_irqs) {
ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < finfo->nr_irqs; i++)
ctx[i].irq =
@@ -835,55 +852,94 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
kfree(finfo);
}
- ret = platform_device_add(binfo->feature_dev);
- if (!ret) {
- if (type == PORT_ID)
- dfl_fpga_cdev_add_port_dev(binfo->cdev,
- binfo->feature_dev);
- else
- binfo->cdev->fme_dev =
- get_device(&binfo->feature_dev->dev);
- /*
- * reset it to avoid build_info_free() freeing their resource.
- *
- * The resource of successfully registered feature devices
- * will be freed by platform_device_unregister(). See the
- * comments in build_info_create_dev().
- */
- binfo->feature_dev = NULL;
- }
+ fdata->resource_num = res_idx;
- return ret;
+ return fdata;
}
-static int
-build_info_create_dev(struct build_feature_devs_info *binfo,
- enum dfl_id_type type)
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int feature_dev_register(struct dfl_feature_dev_data *fdata)
{
+ struct dfl_feature_platform_data pdata = {};
struct platform_device *fdev;
+ struct dfl_feature *feature;
+ int ret;
- if (type >= DFL_ID_MAX)
- return -EINVAL;
-
- /*
- * we use -ENODEV as the initialization indicator which indicates
- * whether the id need to be reclaimed
- */
- fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ fdev = platform_device_alloc(fdata->pdev_name, fdata->pdev_id);
if (!fdev)
return -ENOMEM;
- binfo->feature_dev = fdev;
- binfo->feature_num = 0;
+ fdata->dev = fdev;
- INIT_LIST_HEAD(&binfo->sub_features);
+ fdev->dev.parent = &fdata->dfl_cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[fdata->type].devt_type, fdev->id);
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = fdev;
+
+ ret = platform_device_add_resources(fdev, fdata->resources,
+ fdata->resource_num);
+ if (ret)
+ goto err_put_dev;
- fdev->id = dfl_id_alloc(type, &fdev->dev);
- if (fdev->id < 0)
- return fdev->id;
+ pdata.fdata = fdata;
+ ret = platform_device_add_data(fdev, &pdata, sizeof(pdata));
+ if (ret)
+ goto err_put_dev;
- fdev->dev.parent = &binfo->cdev->region->dev;
- fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+ ret = platform_device_add(fdev);
+ if (ret)
+ goto err_put_dev;
+
+ return 0;
+
+err_put_dev:
+ platform_device_put(fdev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+
+ return ret;
+}
+
+static void feature_dev_unregister(struct dfl_feature_dev_data *fdata)
+{
+ struct dfl_feature *feature;
+
+ platform_device_unregister(fdata->dev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+}
+
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_dev_data *fdata;
+ int ret;
+
+ fdata = binfo_create_feature_dev_data(binfo);
+ if (IS_ERR(fdata))
+ return PTR_ERR(fdata);
+
+ ret = feature_dev_register(fdata);
+ if (ret)
+ return ret;
+
+ if (binfo->type == PORT_ID)
+ dfl_fpga_cdev_add_port_data(binfo->cdev, fdata);
+ else
+ binfo->cdev->fme_dev = get_device(&fdata->dev->dev);
+
+ /* reset the binfo for next FIU */
+ binfo->type = DFL_ID_MAX;
return 0;
}
@@ -892,22 +948,11 @@ static void build_info_free(struct build_feature_devs_info *binfo)
{
struct dfl_feature_info *finfo, *p;
- /*
- * it is a valid id, free it. See comments in
- * build_info_create_dev()
- */
- if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
- dfl_id_free(feature_dev_id_type(binfo->feature_dev),
- binfo->feature_dev->id);
-
- list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- list_del(&finfo->node);
- kfree(finfo);
- }
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
}
- platform_device_put(binfo->feature_dev);
-
devm_kfree(binfo->dev, binfo);
}
@@ -934,56 +979,115 @@ static u16 feature_id(u64 value)
return 0;
}
+static u64 *find_param(u64 *params, resource_size_t max, int param_id)
+{
+ u64 *end = params + max / sizeof(u64);
+ u64 v, next;
+
+ while (params < end) {
+ v = *params;
+ if (param_id == FIELD_GET(DFHv1_PARAM_HDR_ID, v))
+ return params;
+
+ if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
+ break;
+
+ next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
+ params += next;
+ }
+
+ return NULL;
+}
+
+/**
+ * dfh_find_param() - find parameter block for the given parameter id
+ * @dfl_dev: dfl device
+ * @param_id: id of dfl parameter
+ * @psize: destination to store size of parameter data in bytes
+ *
+ * Return: pointer to start of parameter data, PTR_ERR otherwise.
+ */
+void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *psize)
+{
+ u64 *phdr = find_param(dfl_dev->params, dfl_dev->param_size, param_id);
+
+ if (!phdr)
+ return ERR_PTR(-ENOENT);
+
+ if (psize)
+ *psize = (FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, *phdr) - 1) * sizeof(u64);
+
+ return phdr + 1;
+}
+EXPORT_SYMBOL_GPL(dfh_find_param);
+
static int parse_feature_irqs(struct build_feature_devs_info *binfo,
- resource_size_t ofst, u16 fid,
- unsigned int *irq_base, unsigned int *nr_irqs)
+ resource_size_t ofst, struct dfl_feature_info *finfo)
{
void __iomem *base = binfo->ioaddr + ofst;
unsigned int i, ibase, inr = 0;
+ void *params = finfo->params;
enum dfl_id_type type;
+ u16 fid = finfo->fid;
int virq;
+ u64 *p;
u64 v;
- type = feature_dev_id_type(binfo->feature_dev);
+ switch (finfo->dfh_version) {
+ case 0:
+ /*
+ * DFHv0 only provides MMIO resource information for each feature
+ * in the DFL header. There is no generic interrupt information.
+ * Instead, features with interrupt functionality provide
+ * the information in feature specific registers.
+ */
+ type = binfo->type;
+ if (type == PORT_ID) {
+ switch (fid) {
+ case PORT_FEATURE_ID_UINT:
+ v = readq(base + PORT_UINT_CAP);
+ ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+ inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+ break;
+ case PORT_FEATURE_ID_ERROR:
+ v = readq(base + PORT_ERROR_CAP);
+ ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ } else if (type == FME_ID) {
+ switch (fid) {
+ case FME_FEATURE_ID_GLOBAL_ERR:
+ v = readq(base + FME_ERROR_CAP);
+ ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ }
+ break;
- /*
- * Ideally DFL framework should only read info from DFL header, but
- * current version DFL only provides mmio resources information for
- * each feature in DFL Header, no field for interrupt resources.
- * Interrupt resource information is provided by specific mmio
- * registers of each private feature which supports interrupt. So in
- * order to parse and assign irq resources, DFL framework has to look
- * into specific capability registers of these private features.
- *
- * Once future DFL version supports generic interrupt resource
- * information in common DFL headers, the generic interrupt parsing
- * code will be added. But in order to be compatible to old version
- * DFL, the driver may still fall back to these quirks.
- */
- if (type == PORT_ID) {
- switch (fid) {
- case PORT_FEATURE_ID_UINT:
- v = readq(base + PORT_UINT_CAP);
- ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
- inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
- break;
- case PORT_FEATURE_ID_ERROR:
- v = readq(base + PORT_ERROR_CAP);
- ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ case 1:
+ /*
+ * DFHv1 provides interrupt resource information in DFHv1
+ * parameter blocks.
+ */
+ p = find_param(params, finfo->param_size, DFHv1_PARAM_ID_MSI_X);
+ if (!p)
break;
- }
- } else if (type == FME_ID) {
- if (fid == FME_FEATURE_ID_GLOBAL_ERR) {
- v = readq(base + FME_ERROR_CAP);
- ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
- }
+
+ p++;
+ ibase = FIELD_GET(DFHv1_PARAM_MSI_X_STARTV, *p);
+ inr = FIELD_GET(DFHv1_PARAM_MSI_X_NUMV, *p);
+ break;
+
+ default:
+ dev_warn(binfo->dev, "unexpected DFH version %d\n", finfo->dfh_version);
+ break;
}
if (!inr) {
- *irq_base = 0;
- *nr_irqs = 0;
+ finfo->irq_base = 0;
+ finfo->nr_irqs = 0;
return 0;
}
@@ -1006,12 +1110,37 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
}
}
- *irq_base = ibase;
- *nr_irqs = inr;
+ finfo->irq_base = ibase;
+ finfo->nr_irqs = inr;
return 0;
}
+static int dfh_get_param_size(void __iomem *dfh_base, resource_size_t max)
+{
+ int size = 0;
+ u64 v, next;
+
+ if (!FIELD_GET(DFHv1_CSR_SIZE_GRP_HAS_PARAMS,
+ readq(dfh_base + DFHv1_CSR_SIZE_GRP)))
+ return 0;
+
+ while (size + DFHv1_PARAM_HDR < max) {
+ v = readq(dfh_base + DFHv1_PARAM_HDR + size);
+
+ next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
+ if (!next)
+ return -EINVAL;
+
+ size += next * sizeof(u64);
+
+ if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
+ return size;
+ }
+
+ return -ENOENT;
+}
+
/*
* when create sub feature instances, for private features, it doesn't need
* to provide resource size and feature id as they could be read from DFH
@@ -1023,39 +1152,69 @@ static int
create_feature_instance(struct build_feature_devs_info *binfo,
resource_size_t ofst, resource_size_t size, u16 fid)
{
- unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo;
+ resource_size_t start, end;
+ int dfh_psize = 0;
u8 revision = 0;
+ u64 v, addr_off;
+ u8 dfh_ver = 0;
int ret;
- u64 v;
if (fid != FEATURE_ID_AFU) {
v = readq(binfo->ioaddr + ofst);
revision = FIELD_GET(DFH_REVISION, v);
-
+ dfh_ver = FIELD_GET(DFH_VERSION, v);
/* read feature size and id if inputs are invalid */
size = size ? size : feature_size(v);
fid = fid ? fid : feature_id(v);
+ if (dfh_ver == 1) {
+ dfh_psize = dfh_get_param_size(binfo->ioaddr + ofst, size);
+ if (dfh_psize < 0) {
+ dev_err(binfo->dev,
+ "failed to read size of DFHv1 parameters %d\n",
+ dfh_psize);
+ return dfh_psize;
+ }
+ dev_dbg(binfo->dev, "dfhv1_psize %d\n", dfh_psize);
+ }
}
if (binfo->len - ofst < size)
return -EINVAL;
- ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
- if (ret)
- return ret;
-
- finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
+ finfo = kzalloc(struct_size(finfo, params, dfh_psize / sizeof(u64)), GFP_KERNEL);
if (!finfo)
return -ENOMEM;
+ memcpy_fromio(finfo->params, binfo->ioaddr + ofst + DFHv1_PARAM_HDR, dfh_psize);
+ finfo->param_size = dfh_psize;
+
finfo->fid = fid;
finfo->revision = revision;
- finfo->mmio_res.start = binfo->start + ofst;
- finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
+ finfo->dfh_version = dfh_ver;
+ if (dfh_ver == 1) {
+ v = readq(binfo->ioaddr + ofst + DFHv1_CSR_ADDR);
+ addr_off = FIELD_GET(DFHv1_CSR_ADDR_MASK, v);
+ if (FIELD_GET(DFHv1_CSR_ADDR_REL, v))
+ start = addr_off << 1;
+ else
+ start = binfo->start + ofst + addr_off;
+
+ v = readq(binfo->ioaddr + ofst + DFHv1_CSR_SIZE_GRP);
+ end = start + FIELD_GET(DFHv1_CSR_SIZE_GRP_SIZE, v) - 1;
+ } else {
+ start = binfo->start + ofst;
+ end = start + size - 1;
+ }
finfo->mmio_res.flags = IORESOURCE_MEM;
- finfo->irq_base = irq_base;
- finfo->nr_irqs = nr_irqs;
+ finfo->mmio_res.start = start;
+ finfo->mmio_res.end = end;
+
+ ret = parse_feature_irqs(binfo, ofst, finfo);
+ if (ret) {
+ kfree(finfo);
+ return ret;
+ }
list_add_tail(&finfo->node, &binfo->sub_features);
binfo->feature_num++;
@@ -1074,7 +1233,7 @@ static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
}
-#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
+#define is_feature_dev_detected(binfo) ((binfo)->type != DFL_ID_MAX)
static int parse_feature_afu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
@@ -1084,12 +1243,11 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo,
return -EINVAL;
}
- switch (feature_dev_id_type(binfo->feature_dev)) {
+ switch (binfo->type) {
case PORT_ID:
return parse_feature_port_afu(binfo, ofst);
default:
- dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
- binfo->feature_dev->name);
+ dev_info(binfo->dev, "AFU belonging to FIU is not supported yet.\n");
}
return 0;
@@ -1130,6 +1288,7 @@ static void build_info_complete(struct build_feature_devs_info *binfo)
static int parse_feature_fiu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
{
+ enum dfl_id_type type;
int ret = 0;
u32 offset;
u16 id;
@@ -1151,10 +1310,13 @@ static int parse_feature_fiu(struct build_feature_devs_info *binfo,
v = readq(binfo->ioaddr + DFH);
id = FIELD_GET(DFH_ID, v);
- /* create platform device for dfl feature dev */
- ret = build_info_create_dev(binfo, dfh_id_to_type(id));
- if (ret)
- return ret;
+ type = dfh_id_to_type(id);
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ binfo->type = type;
+ binfo->feature_num = 0;
+ INIT_LIST_HEAD(&binfo->sub_features);
ret = create_feature_instance(binfo, 0, 0, 0);
if (ret)
@@ -1372,13 +1534,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
static int remove_feature_dev(struct device *dev, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- enum dfl_id_type type = feature_dev_id_type(pdev);
- int id = pdev->id;
-
- platform_device_unregister(pdev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- dfl_id_free(type, id);
+ feature_dev_unregister(fdata);
return 0;
}
@@ -1430,6 +1588,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
goto unregister_region_exit;
}
+ binfo->type = DFL_ID_MAX;
binfo->dev = info->dev;
binfo->cdev = cdev;
@@ -1471,25 +1630,10 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
*/
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata, *ptmp;
-
mutex_lock(&cdev->lock);
if (cdev->fme_dev)
put_device(cdev->fme_dev);
- list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
- struct platform_device *port_dev = pdata->dev;
-
- /* remove released ports */
- if (!device_is_registered(&port_dev->dev)) {
- dfl_id_free(feature_dev_id_type(port_dev),
- port_dev->id);
- platform_device_put(port_dev);
- }
-
- list_del(&pdata->node);
- put_device(&port_dev->dev);
- }
mutex_unlock(&cdev->lock);
remove_feature_devs(cdev);
@@ -1500,7 +1644,7 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
/**
- * __dfl_fpga_cdev_find_port - find a port under given container device
+ * __dfl_fpga_cdev_find_port_data - find a port under given container device
*
* @cdev: container device
* @data: data passed to match function
@@ -1513,23 +1657,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
*
* NOTE: you will need to drop the device reference with put_device() after use.
*/
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_dev;
-
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- port_dev = pdata->dev;
+ struct dfl_feature_dev_data *fdata;
- if (match(port_dev, data) && get_device(&port_dev->dev))
- return port_dev;
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (match(fdata, data))
+ return fdata;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port_data);
static int __init dfl_fpga_init(void)
{
@@ -1563,33 +1704,28 @@ static int __init dfl_fpga_init(void)
*/
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (!device_is_registered(&port_pdev->dev)) {
+ if (!fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- pdata = dev_get_platdata(&port_pdev->dev);
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, true);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, true);
+ mutex_unlock(&fdata->lock);
if (ret)
- goto put_dev_exit;
+ goto unlock_exit;
- platform_device_del(port_pdev);
+ feature_dev_unregister(fdata);
cdev->released_port_num++;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1609,34 +1745,29 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
*/
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (device_is_registered(&port_pdev->dev)) {
+ if (fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- ret = platform_device_add(port_pdev);
+ ret = feature_dev_register(fdata);
if (ret)
- goto put_dev_exit;
-
- pdata = dev_get_platdata(&port_pdev->dev);
+ goto unlock_exit;
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
+ mutex_unlock(&fdata->lock);
cdev->released_port_num--;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1646,10 +1777,11 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
static void config_port_access_mode(struct device *fme_dev, int port_id,
bool is_vf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(fme_dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_PORT_OFST(port_id));
@@ -1673,14 +1805,14 @@ static void config_port_access_mode(struct device *fme_dev, int port_id,
*/
void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_pf_mode(cdev->fme_dev, pdata->id);
+ config_port_pf_mode(cdev->fme_dev, fdata->id);
}
mutex_unlock(&cdev->lock);
}
@@ -1699,7 +1831,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
*/
int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
int ret = 0;
mutex_lock(&cdev->lock);
@@ -1713,11 +1845,11 @@ int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
goto done;
}
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_vf_mode(cdev->fme_dev, pdata->id);
+ config_port_vf_mode(cdev->fme_dev, fdata->id);
}
done:
mutex_unlock(&cdev->lock);
@@ -1729,7 +1861,7 @@ static irqreturn_t dfl_irq_handler(int irq, void *arg)
{
struct eventfd_ctx *trigger = arg;
- eventfd_signal(trigger, 1);
+ eventfd_signal(trigger);
return IRQ_HANDLED;
}
@@ -1850,7 +1982,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fpga_irq_set hdr;
s32 *fds;
long ret;
@@ -1865,14 +1997,14 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
(hdr.start + hdr.count < hdr.start))
return -EINVAL;
- fds = memdup_user((void __user *)(arg + sizeof(hdr)),
- array_size(hdr.count, sizeof(s32)));
+ fds = memdup_array_user((void __user *)(arg + sizeof(hdr)),
+ hdr.count, sizeof(s32));
if (IS_ERR(fds))
return PTR_ERR(fds);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
kfree(fds);
return ret;
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 06cfcd5e84bb..95539f1213cb 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -17,6 +17,7 @@
#include <linux/bitfield.h>
#include <linux/cdev.h>
#include <linux/delay.h>
+#include <linux/dfl.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
@@ -74,11 +75,47 @@
#define DFH_REVISION GENMASK_ULL(15, 12) /* Feature revision */
#define DFH_NEXT_HDR_OFST GENMASK_ULL(39, 16) /* Offset to next DFH */
#define DFH_EOL BIT_ULL(40) /* End of list */
+#define DFH_VERSION GENMASK_ULL(59, 52) /* DFH version */
#define DFH_TYPE GENMASK_ULL(63, 60) /* Feature type */
#define DFH_TYPE_AFU 1
#define DFH_TYPE_PRIVATE 3
#define DFH_TYPE_FIU 4
+/*
+ * DFHv1 Register Offset definitons
+ * In DHFv1, DFH + GUID + CSR_START + CSR_SIZE_GROUP + PARAM_HDR + PARAM_DATA
+ * as common header registers
+ */
+#define DFHv1_CSR_ADDR 0x18 /* CSR Register start address */
+#define DFHv1_CSR_SIZE_GRP 0x20 /* Size of Reg Block and Group/tag */
+#define DFHv1_PARAM_HDR 0x28 /* Optional First Param header */
+
+/*
+ * CSR Rel Bit, 1'b0 = relative (offset from feature DFH start),
+ * 1'b1 = absolute (ARM or other non-PCIe use)
+ */
+#define DFHv1_CSR_ADDR_REL BIT_ULL(0)
+
+/* CSR Header Register Bit Definitions */
+#define DFHv1_CSR_ADDR_MASK GENMASK_ULL(63, 1) /* 63:1 of CSR address */
+
+/* CSR SIZE Goup Register Bit Definitions */
+#define DFHv1_CSR_SIZE_GRP_INSTANCE_ID GENMASK_ULL(15, 0) /* Enumeration instantiated IP */
+#define DFHv1_CSR_SIZE_GRP_GROUPING_ID GENMASK_ULL(30, 16) /* Group Features/interfaces */
+#define DFHv1_CSR_SIZE_GRP_HAS_PARAMS BIT_ULL(31) /* Presence of Parameters */
+#define DFHv1_CSR_SIZE_GRP_SIZE GENMASK_ULL(63, 32) /* Size of CSR Block in bytes */
+
+/* PARAM Header Register Bit Definitions */
+#define DFHv1_PARAM_HDR_ID GENMASK_ULL(15, 0) /* Id of this Param */
+#define DFHv1_PARAM_HDR_VER GENMASK_ULL(31, 16) /* Version Param */
+#define DFHv1_PARAM_HDR_NEXT_OFFSET GENMASK_ULL(63, 35) /* Offset of next Param */
+#define DFHv1_PARAM_HDR_NEXT_EOP BIT_ULL(32)
+#define DFHv1_PARAM_DATA 0x08 /* Offset of Param data from Param header */
+
+#define DFHv1_PARAM_ID_MSI_X 0x1
+#define DFHv1_PARAM_MSI_X_NUMV GENMASK_ULL(63, 32)
+#define DFHv1_PARAM_MSI_X_STARTV GENMASK_ULL(31, 0)
+
/* Next AFU Register Bitfield */
#define NEXT_AFU_NEXT_DFH_OFST GENMASK_ULL(23, 0) /* Offset to next AFU */
@@ -170,6 +207,8 @@
#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */
#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */
+struct dfl_feature_dev_data;
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -183,15 +222,15 @@ struct dfl_fpga_port_ops {
const char *name;
struct module *owner;
struct list_head node;
- int (*get_id)(struct platform_device *pdev);
- int (*enable_set)(struct platform_device *pdev, bool enable);
+ int (*get_id)(struct dfl_feature_dev_data *fdata);
+ int (*enable_set)(struct dfl_feature_dev_data *fdata, bool enable);
};
void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata);
void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id);
/**
* struct dfl_feature_id - dfl private feature id
@@ -231,6 +270,7 @@ struct dfl_feature_irq_ctx {
*
* @dev: ptr to pdev of the feature device which has the sub feature.
* @id: sub feature id.
+ * @revision: revision of this sub feature.
* @resource_index: each sub feature has one mmio resource for its registers.
* this index is used to find its mmio resource from the
* feature dev (platform device)'s resources.
@@ -240,6 +280,9 @@ struct dfl_feature_irq_ctx {
* @ops: ops of this sub feature.
* @ddev: ptr to the dfl device of this sub feature.
* @priv: priv data of this feature.
+ * @dfh_version: version of the DFH
+ * @param_size: size of dfh parameters
+ * @params: point to memory copy of dfh parameters
*/
struct dfl_feature {
struct platform_device *dev;
@@ -252,31 +295,40 @@ struct dfl_feature {
const struct dfl_feature_ops *ops;
struct dfl_device *ddev;
void *priv;
+ u8 dfh_version;
+ unsigned int param_size;
+ void *params;
};
#define FEATURE_DEV_ID_UNUSED (-1)
/**
- * struct dfl_feature_platform_data - platform data for feature devices
+ * struct dfl_feature_dev_data - dfl enumeration data for dfl feature dev.
*
- * @node: node to link feature devs to container device's port_dev_list.
- * @lock: mutex to protect platform data.
- * @cdev: cdev of feature dev.
- * @dev: ptr to platform device linked with this platform data.
+ * @node: node to link the data structure to container device's port_dev_list.
+ * @lock: mutex to protect feature dev data.
+ * @dev: ptr to the feature's platform device linked with this structure.
+ * @type: type of DFL FIU for the feature dev. See enum dfl_id_type.
+ * @pdev_id: platform device id for the feature dev.
+ * @pdev_name: platform device name for the feature dev.
* @dfl_cdev: ptr to container device.
- * @id: id used for this feature device.
+ * @id: id used for the feature device.
* @disable_count: count for port disable.
* @excl_open: set on feature device exclusive open.
* @open_count: count for feature device open.
* @num: number for sub features.
* @private: ptr to feature dev private data.
- * @features: sub features of this feature dev.
+ * @features: sub features for the feature dev.
+ * @resource_num: number of resources for the feature dev.
+ * @resources: resources for the feature dev.
*/
-struct dfl_feature_platform_data {
+struct dfl_feature_dev_data {
struct list_head node;
struct mutex lock;
- struct cdev cdev;
struct platform_device *dev;
+ enum dfl_id_type type;
+ int pdev_id;
+ const char *pdev_name;
struct dfl_fpga_cdev *dfl_cdev;
int id;
unsigned int disable_count;
@@ -284,55 +336,68 @@ struct dfl_feature_platform_data {
int open_count;
void *private;
int num;
- struct dfl_feature features[];
+ struct dfl_feature *features;
+ int resource_num;
+ struct resource *resources;
+};
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @cdev: cdev of feature dev.
+ * @fdata: dfl enumeration data for the dfl feature device.
+ */
+struct dfl_feature_platform_data {
+ struct cdev cdev;
+ struct dfl_feature_dev_data *fdata;
};
static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+int dfl_feature_dev_use_begin(struct dfl_feature_dev_data *fdata,
bool excl)
{
- if (pdata->excl_open)
+ if (fdata->excl_open)
return -EBUSY;
if (excl) {
- if (pdata->open_count)
+ if (fdata->open_count)
return -EBUSY;
- pdata->excl_open = true;
+ fdata->excl_open = true;
}
- pdata->open_count++;
+ fdata->open_count++;
return 0;
}
static inline
-void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+void dfl_feature_dev_use_end(struct dfl_feature_dev_data *fdata)
{
- pdata->excl_open = false;
+ fdata->excl_open = false;
- if (WARN_ON(pdata->open_count <= 0))
+ if (WARN_ON(fdata->open_count <= 0))
return;
- pdata->open_count--;
+ fdata->open_count--;
}
static inline
-int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_count(struct dfl_feature_dev_data *fdata)
{
- return pdata->open_count;
+ return fdata->open_count;
}
static inline
-void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+void dfl_fpga_fdata_set_private(struct dfl_feature_dev_data *fdata,
void *private)
{
- pdata->private = private;
+ fdata->private = private;
}
static inline
-void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+void *dfl_fpga_fdata_get_private(struct dfl_feature_dev_data *fdata)
{
- return pdata->private;
+ return fdata->private;
}
struct dfl_feature_ops {
@@ -355,37 +420,36 @@ int dfl_fpga_dev_ops_register(struct platform_device *pdev,
struct module *owner);
void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
-static inline
-struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
+static inline struct dfl_feature_dev_data *
+dfl_fpga_inode_to_feature_dev_data(struct inode *inode)
{
struct dfl_feature_platform_data *pdata;
pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
cdev);
- return pdata->dev;
+ return pdata->fdata;
}
-#define dfl_fpga_dev_for_each_feature(pdata, feature) \
- for ((feature) = (pdata)->features; \
- (feature) < (pdata)->features + (pdata)->num; (feature)++)
+#define dfl_fpga_dev_for_each_feature(fdata, feature) \
+ for ((feature) = (fdata)->features; \
+ (feature) < (fdata)->features + (fdata)->num; (feature)++)
-static inline
-struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id)
+static inline struct dfl_feature *
+dfl_get_feature_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_dev_for_each_feature(fdata, feature)
if (feature->id == id)
return feature;
return NULL;
}
-static inline
-void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
+static inline void __iomem *
+dfl_get_feature_ioaddr_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+ struct dfl_feature *feature = dfl_get_feature_by_id(fdata, id);
if (feature && feature->ioaddr)
return feature->ioaddr;
@@ -394,15 +458,18 @@ void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
return NULL;
}
-static inline bool is_dfl_feature_present(struct device *dev, u16 id)
+static inline struct dfl_feature_dev_data *
+to_dfl_feature_dev_data(struct device *dev)
{
- return !!dfl_get_feature_ioaddr_by_id(dev, id);
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+
+ return pdata->fdata;
}
static inline
-struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+struct device *dfl_fpga_fdata_to_parent(struct dfl_feature_dev_data *fdata)
{
- return pdata->dev->dev.parent->parent;
+ return fdata->dev->dev.parent->parent;
}
static inline bool dfl_feature_is_fme(void __iomem *base)
@@ -484,26 +551,21 @@ struct dfl_fpga_cdev *
dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
-/*
- * need to drop the device reference with put_device() after use port platform
- * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
- * functions.
- */
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *));
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *));
-static inline struct platform_device *
-dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+static inline struct dfl_feature_dev_data *
+dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct platform_device *pdev;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, data, match);
mutex_unlock(&cdev->lock);
- return pdev;
+ return fdata;
}
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 727704431f61..8ef395b49bf8 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -14,7 +14,7 @@
#include <linux/spinlock.h>
static DEFINE_IDA(fpga_bridge_ida);
-static struct class *fpga_bridge_class;
+static const struct class fpga_bridge_class;
/* Lock for adding/removing bridges to linked lists*/
static DEFINE_SPINLOCK(bridge_list_lock);
@@ -30,7 +30,7 @@ int fpga_bridge_enable(struct fpga_bridge *bridge)
{
dev_dbg(&bridge->dev, "enable\n");
- if (bridge->br_ops && bridge->br_ops->enable_set)
+ if (bridge->br_ops->enable_set)
return bridge->br_ops->enable_set(bridge, 1);
return 0;
@@ -48,62 +48,61 @@ int fpga_bridge_disable(struct fpga_bridge *bridge)
{
dev_dbg(&bridge->dev, "disable\n");
- if (bridge->br_ops && bridge->br_ops->enable_set)
+ if (bridge->br_ops->enable_set)
return bridge->br_ops->enable_set(bridge, 0);
return 0;
}
EXPORT_SYMBOL_GPL(fpga_bridge_disable);
-static struct fpga_bridge *__fpga_bridge_get(struct device *dev,
+static struct fpga_bridge *__fpga_bridge_get(struct device *bridge_dev,
struct fpga_image_info *info)
{
struct fpga_bridge *bridge;
- int ret = -ENODEV;
- bridge = to_fpga_bridge(dev);
+ bridge = to_fpga_bridge(bridge_dev);
bridge->info = info;
- if (!mutex_trylock(&bridge->mutex)) {
- ret = -EBUSY;
- goto err_dev;
- }
+ if (!mutex_trylock(&bridge->mutex))
+ return ERR_PTR(-EBUSY);
- if (!try_module_get(dev->parent->driver->owner))
- goto err_ll_mod;
+ if (!try_module_get(bridge->br_ops_owner)) {
+ mutex_unlock(&bridge->mutex);
+ return ERR_PTR(-ENODEV);
+ }
dev_dbg(&bridge->dev, "get\n");
return bridge;
-
-err_ll_mod:
- mutex_unlock(&bridge->mutex);
-err_dev:
- put_device(dev);
- return ERR_PTR(ret);
}
/**
* of_fpga_bridge_get - get an exclusive reference to an fpga bridge
*
- * @np: node pointer of an FPGA bridge
- * @info: fpga image specific information
+ * @np: node pointer of an FPGA bridge.
+ * @info: fpga image specific information.
*
- * Return fpga_bridge struct if successful.
- * Return -EBUSY if someone already has a reference to the bridge.
- * Return -ENODEV if @np is not an FPGA Bridge.
+ * Return:
+ * * fpga_bridge struct pointer if successful.
+ * * -EBUSY if someone already has a reference to the bridge.
+ * * -ENODEV if @np is not an FPGA Bridge or can't take parent driver refcount.
*/
struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
struct fpga_image_info *info)
{
- struct device *dev;
+ struct fpga_bridge *bridge;
+ struct device *bridge_dev;
- dev = class_find_device_by_of_node(fpga_bridge_class, np);
- if (!dev)
+ bridge_dev = class_find_device_by_of_node(&fpga_bridge_class, np);
+ if (!bridge_dev)
return ERR_PTR(-ENODEV);
- return __fpga_bridge_get(dev, info);
+ bridge = __fpga_bridge_get(bridge_dev, info);
+ if (IS_ERR(bridge))
+ put_device(bridge_dev);
+
+ return bridge;
}
EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
@@ -115,7 +114,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
/**
* fpga_bridge_get - get an exclusive reference to an fpga bridge
* @dev: parent device that fpga bridge was registered with
- * @info: fpga manager info
+ * @info: fpga image specific information
*
* Given a device, get an exclusive reference to an fpga bridge.
*
@@ -124,14 +123,19 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
struct fpga_bridge *fpga_bridge_get(struct device *dev,
struct fpga_image_info *info)
{
+ struct fpga_bridge *bridge;
struct device *bridge_dev;
- bridge_dev = class_find_device(fpga_bridge_class, NULL, dev,
+ bridge_dev = class_find_device(&fpga_bridge_class, NULL, dev,
fpga_bridge_dev_match);
if (!bridge_dev)
return ERR_PTR(-ENODEV);
- return __fpga_bridge_get(bridge_dev, info);
+ bridge = __fpga_bridge_get(bridge_dev, info);
+ if (IS_ERR(bridge))
+ put_device(bridge_dev);
+
+ return bridge;
}
EXPORT_SYMBOL_GPL(fpga_bridge_get);
@@ -145,7 +149,7 @@ void fpga_bridge_put(struct fpga_bridge *bridge)
dev_dbg(&bridge->dev, "put\n");
bridge->info = NULL;
- module_put(bridge->dev.parent->driver->owner);
+ module_put(bridge->br_ops_owner);
mutex_unlock(&bridge->mutex);
put_device(&bridge->dev);
}
@@ -155,9 +159,9 @@ EXPORT_SYMBOL_GPL(fpga_bridge_put);
* fpga_bridges_enable - enable bridges in a list
* @bridge_list: list of FPGA bridges
*
- * Enable each bridge in the list. If list is empty, do nothing.
+ * Enable each bridge in the list. If list is empty, do nothing.
*
- * Return 0 for success or empty bridge list; return error code otherwise.
+ * Return: 0 for success or empty bridge list or an error code otherwise.
*/
int fpga_bridges_enable(struct list_head *bridge_list)
{
@@ -179,9 +183,9 @@ EXPORT_SYMBOL_GPL(fpga_bridges_enable);
*
* @bridge_list: list of FPGA bridges
*
- * Disable each bridge in the list. If list is empty, do nothing.
+ * Disable each bridge in the list. If list is empty, do nothing.
*
- * Return 0 for success or empty bridge list; return error code otherwise.
+ * Return: 0 for success or empty bridge list or an error code otherwise.
*/
int fpga_bridges_disable(struct list_head *bridge_list)
{
@@ -230,7 +234,7 @@ EXPORT_SYMBOL_GPL(fpga_bridges_put);
*
* Get an exclusive reference to the bridge and it to the list.
*
- * Return 0 for success, error code from of_fpga_bridge_get() otherwise.
+ * Return: 0 for success, error code from of_fpga_bridge_get() otherwise.
*/
int of_fpga_bridge_get_to_list(struct device_node *np,
struct fpga_image_info *info,
@@ -260,7 +264,7 @@ EXPORT_SYMBOL_GPL(of_fpga_bridge_get_to_list);
*
* Get an exclusive reference to the bridge and it to the list.
*
- * Return 0 for success, error code from fpga_bridge_get() otherwise.
+ * Return: 0 for success, error code from fpga_bridge_get() otherwise.
*/
int fpga_bridge_get_to_list(struct device *dev,
struct fpga_image_info *info,
@@ -293,12 +297,15 @@ static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fpga_bridge *bridge = to_fpga_bridge(dev);
- int enable = 1;
+ int state = 1;
- if (bridge->br_ops && bridge->br_ops->enable_show)
- enable = bridge->br_ops->enable_show(bridge);
+ if (bridge->br_ops->enable_show) {
+ state = bridge->br_ops->enable_show(bridge);
+ if (state < 0)
+ return state;
+ }
- return sprintf(buf, "%s\n", enable ? "enabled" : "disabled");
+ return sysfs_emit(buf, "%s\n", state ? "enabled" : "disabled");
}
static DEVICE_ATTR_RO(name);
@@ -312,18 +319,19 @@ static struct attribute *fpga_bridge_attrs[] = {
ATTRIBUTE_GROUPS(fpga_bridge);
/**
- * fpga_bridge_register - create and register an FPGA Bridge device
+ * __fpga_bridge_register - create and register an FPGA Bridge device
* @parent: FPGA bridge device from pdev
* @name: FPGA bridge name
* @br_ops: pointer to structure of fpga bridge ops
* @priv: FPGA bridge private data
+ * @owner: owner module containing the br_ops
*
* Return: struct fpga_bridge pointer or ERR_PTR()
*/
struct fpga_bridge *
-fpga_bridge_register(struct device *parent, const char *name,
- const struct fpga_bridge_ops *br_ops,
- void *priv)
+__fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv, struct module *owner)
{
struct fpga_bridge *bridge;
int id, ret;
@@ -353,14 +361,14 @@ fpga_bridge_register(struct device *parent, const char *name,
bridge->name = name;
bridge->br_ops = br_ops;
+ bridge->br_ops_owner = owner;
bridge->priv = priv;
bridge->dev.groups = br_ops->groups;
- bridge->dev.class = fpga_bridge_class;
+ bridge->dev.class = &fpga_bridge_class;
bridge->dev.parent = parent;
bridge->dev.of_node = parent->of_node;
bridge->dev.id = id;
- of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
ret = dev_set_name(&bridge->dev, "br%d", id);
if (ret)
@@ -372,6 +380,8 @@ fpga_bridge_register(struct device *parent, const char *name,
return ERR_PTR(ret);
}
+ of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
+
return bridge;
error_device:
@@ -381,7 +391,7 @@ error_kfree:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(fpga_bridge_register);
+EXPORT_SYMBOL_GPL(__fpga_bridge_register);
/**
* fpga_bridge_unregister - unregister an FPGA bridge
@@ -396,7 +406,7 @@ void fpga_bridge_unregister(struct fpga_bridge *bridge)
* If the low level driver provides a method for putting bridge into
* a desired state upon unregister, do it.
*/
- if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove)
+ if (bridge->br_ops->fpga_bridge_remove)
bridge->br_ops->fpga_bridge_remove(bridge);
device_unregister(&bridge->dev);
@@ -411,21 +421,20 @@ static void fpga_bridge_dev_release(struct device *dev)
kfree(bridge);
}
+static const struct class fpga_bridge_class = {
+ .name = "fpga_bridge",
+ .dev_groups = fpga_bridge_groups,
+ .dev_release = fpga_bridge_dev_release,
+};
+
static int __init fpga_bridge_dev_init(void)
{
- fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge");
- if (IS_ERR(fpga_bridge_class))
- return PTR_ERR(fpga_bridge_class);
-
- fpga_bridge_class->dev_groups = fpga_bridge_groups;
- fpga_bridge_class->dev_release = fpga_bridge_dev_release;
-
- return 0;
+ return class_register(&fpga_bridge_class);
}
static void __exit fpga_bridge_dev_exit(void)
{
- class_destroy(fpga_bridge_class);
+ class_unregister(&fpga_bridge_class);
ida_destroy(&fpga_bridge_ida);
}
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 8efa67620e21..0f4035b089a2 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -19,7 +19,7 @@
#include <linux/highmem.h>
static DEFINE_IDA(fpga_mgr_ida);
-static struct class *fpga_mgr_class;
+static const struct class fpga_mgr_class;
struct fpga_mgr_devres {
struct fpga_manager *mgr;
@@ -664,20 +664,16 @@ static struct attribute *fpga_mgr_attrs[] = {
};
ATTRIBUTE_GROUPS(fpga_mgr);
-static struct fpga_manager *__fpga_mgr_get(struct device *dev)
+static struct fpga_manager *__fpga_mgr_get(struct device *mgr_dev)
{
struct fpga_manager *mgr;
- mgr = to_fpga_manager(dev);
+ mgr = to_fpga_manager(mgr_dev);
- if (!try_module_get(dev->parent->driver->owner))
- goto err_dev;
+ if (!try_module_get(mgr->mops_owner))
+ mgr = ERR_PTR(-ENODEV);
return mgr;
-
-err_dev:
- put_device(dev);
- return ERR_PTR(-ENODEV);
}
static int fpga_mgr_dev_match(struct device *dev, const void *data)
@@ -693,12 +689,18 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data)
*/
struct fpga_manager *fpga_mgr_get(struct device *dev)
{
- struct device *mgr_dev = class_find_device(fpga_mgr_class, NULL, dev,
- fpga_mgr_dev_match);
+ struct fpga_manager *mgr;
+ struct device *mgr_dev;
+
+ mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev, fpga_mgr_dev_match);
if (!mgr_dev)
return ERR_PTR(-ENODEV);
- return __fpga_mgr_get(mgr_dev);
+ mgr = __fpga_mgr_get(mgr_dev);
+ if (IS_ERR(mgr))
+ put_device(mgr_dev);
+
+ return mgr;
}
EXPORT_SYMBOL_GPL(fpga_mgr_get);
@@ -711,13 +713,18 @@ EXPORT_SYMBOL_GPL(fpga_mgr_get);
*/
struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
{
- struct device *dev;
+ struct fpga_manager *mgr;
+ struct device *mgr_dev;
- dev = class_find_device_by_of_node(fpga_mgr_class, node);
- if (!dev)
+ mgr_dev = class_find_device_by_of_node(&fpga_mgr_class, node);
+ if (!mgr_dev)
return ERR_PTR(-ENODEV);
- return __fpga_mgr_get(dev);
+ mgr = __fpga_mgr_get(mgr_dev);
+ if (IS_ERR(mgr))
+ put_device(mgr_dev);
+
+ return mgr;
}
EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
@@ -727,7 +734,7 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
*/
void fpga_mgr_put(struct fpga_manager *mgr)
{
- module_put(mgr->dev.parent->driver->owner);
+ module_put(mgr->mops_owner);
put_device(&mgr->dev);
}
EXPORT_SYMBOL_GPL(fpga_mgr_put);
@@ -766,9 +773,10 @@ void fpga_mgr_unlock(struct fpga_manager *mgr)
EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
/**
- * fpga_mgr_register_full - create and register an FPGA Manager device
+ * __fpga_mgr_register_full - create and register an FPGA Manager device
* @parent: fpga manager device from pdev
* @info: parameters for fpga manager
+ * @owner: owner module containing the ops
*
* The caller of this function is responsible for calling fpga_mgr_unregister().
* Using devm_fpga_mgr_register_full() instead is recommended.
@@ -776,7 +784,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
* Return: pointer to struct fpga_manager pointer or ERR_PTR()
*/
struct fpga_manager *
-fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
+__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner)
{
const struct fpga_manager_ops *mops = info->mops;
struct fpga_manager *mgr;
@@ -804,12 +813,14 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in
mutex_init(&mgr->ref_mutex);
+ mgr->mops_owner = owner;
+
mgr->name = info->name;
mgr->mops = info->mops;
mgr->priv = info->priv;
mgr->compat_id = info->compat_id;
- mgr->dev.class = fpga_mgr_class;
+ mgr->dev.class = &fpga_mgr_class;
mgr->dev.groups = mops->groups;
mgr->dev.parent = parent;
mgr->dev.of_node = parent->of_node;
@@ -841,14 +852,15 @@ error_kfree:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
+EXPORT_SYMBOL_GPL(__fpga_mgr_register_full);
/**
- * fpga_mgr_register - create and register an FPGA Manager device
+ * __fpga_mgr_register - create and register an FPGA Manager device
* @parent: fpga manager device from pdev
* @name: fpga manager name
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
+ * @owner: owner module containing the ops
*
* The caller of this function is responsible for calling fpga_mgr_unregister().
* Using devm_fpga_mgr_register() instead is recommended. This simple
@@ -859,8 +871,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
* Return: pointer to struct fpga_manager pointer or ERR_PTR()
*/
struct fpga_manager *
-fpga_mgr_register(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops, void *priv)
+__fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv, struct module *owner)
{
struct fpga_manager_info info = { 0 };
@@ -868,9 +880,9 @@ fpga_mgr_register(struct device *parent, const char *name,
info.mops = mops;
info.priv = priv;
- return fpga_mgr_register_full(parent, &info);
+ return __fpga_mgr_register_full(parent, &info, owner);
}
-EXPORT_SYMBOL_GPL(fpga_mgr_register);
+EXPORT_SYMBOL_GPL(__fpga_mgr_register);
/**
* fpga_mgr_unregister - unregister an FPGA manager
@@ -900,9 +912,10 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
}
/**
- * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
+ * __devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
* @parent: fpga manager device from pdev
* @info: parameters for fpga manager
+ * @owner: owner module containing the ops
*
* Return: fpga manager pointer on success, negative error code otherwise.
*
@@ -910,7 +923,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
* function will be called automatically when the managing device is detached.
*/
struct fpga_manager *
-devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
+__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner)
{
struct fpga_mgr_devres *dr;
struct fpga_manager *mgr;
@@ -919,7 +933,7 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf
if (!dr)
return ERR_PTR(-ENOMEM);
- mgr = fpga_mgr_register_full(parent, info);
+ mgr = __fpga_mgr_register_full(parent, info, owner);
if (IS_ERR(mgr)) {
devres_free(dr);
return mgr;
@@ -930,14 +944,15 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf
return mgr;
}
-EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
+EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register_full);
/**
- * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
+ * __devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
* @parent: fpga manager device from pdev
* @name: fpga manager name
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
+ * @owner: owner module containing the ops
*
* Return: fpga manager pointer on success, negative error code otherwise.
*
@@ -946,8 +961,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
* device is detached.
*/
struct fpga_manager *
-devm_fpga_mgr_register(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops, void *priv)
+__devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv,
+ struct module *owner)
{
struct fpga_manager_info info = { 0 };
@@ -955,9 +971,9 @@ devm_fpga_mgr_register(struct device *parent, const char *name,
info.mops = mops;
info.priv = priv;
- return devm_fpga_mgr_register_full(parent, &info);
+ return __devm_fpga_mgr_register_full(parent, &info, owner);
}
-EXPORT_SYMBOL_GPL(devm_fpga_mgr_register);
+EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register);
static void fpga_mgr_dev_release(struct device *dev)
{
@@ -967,23 +983,22 @@ static void fpga_mgr_dev_release(struct device *dev)
kfree(mgr);
}
+static const struct class fpga_mgr_class = {
+ .name = "fpga_manager",
+ .dev_groups = fpga_mgr_groups,
+ .dev_release = fpga_mgr_dev_release,
+};
+
static int __init fpga_mgr_class_init(void)
{
pr_info("FPGA manager framework\n");
- fpga_mgr_class = class_create(THIS_MODULE, "fpga_manager");
- if (IS_ERR(fpga_mgr_class))
- return PTR_ERR(fpga_mgr_class);
-
- fpga_mgr_class->dev_groups = fpga_mgr_groups;
- fpga_mgr_class->dev_release = fpga_mgr_dev_release;
-
- return 0;
+ return class_register(&fpga_mgr_class);
}
static void __exit fpga_mgr_class_exit(void)
{
- class_destroy(fpga_mgr_class);
+ class_unregister(&fpga_mgr_class);
ida_destroy(&fpga_mgr_ida);
}
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 27ff9dea04ae..753cd142503e 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -16,7 +16,7 @@
#include <linux/spinlock.h>
static DEFINE_IDA(fpga_region_ida);
-static struct class *fpga_region_class;
+static const struct class fpga_region_class;
struct fpga_region *
fpga_region_class_find(struct device *start, const void *data,
@@ -24,7 +24,7 @@ fpga_region_class_find(struct device *start, const void *data,
{
struct device *dev;
- dev = class_find_device(fpga_region_class, start, data, match);
+ dev = class_find_device(&fpga_region_class, start, data, match);
if (!dev)
return NULL;
@@ -38,9 +38,10 @@ EXPORT_SYMBOL_GPL(fpga_region_class_find);
*
* Caller should call fpga_region_put() when done with region.
*
- * Return fpga_region struct if successful.
- * Return -EBUSY if someone already has a reference to the region.
- * Return -ENODEV if @np is not an FPGA Region.
+ * Return:
+ * * fpga_region struct if successful.
+ * * -EBUSY if someone already has a reference to the region.
+ * * -ENODEV if can't take parent driver module refcount.
*/
static struct fpga_region *fpga_region_get(struct fpga_region *region)
{
@@ -52,7 +53,7 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region)
}
get_device(dev);
- if (!try_module_get(dev->parent->driver->owner)) {
+ if (!try_module_get(region->ops_owner)) {
put_device(dev);
mutex_unlock(&region->mutex);
return ERR_PTR(-ENODEV);
@@ -74,7 +75,7 @@ static void fpga_region_put(struct fpga_region *region)
dev_dbg(dev, "put\n");
- module_put(dev->parent->driver->owner);
+ module_put(region->ops_owner);
put_device(dev);
mutex_unlock(&region->mutex);
}
@@ -91,7 +92,7 @@ static void fpga_region_put(struct fpga_region *region)
* The caller will need to call fpga_bridges_put() before attempting to
* reprogram the region.
*
- * Return 0 for success or negative error code.
+ * Return: 0 for success or negative error code.
*/
int fpga_region_program_fpga(struct fpga_region *region)
{
@@ -180,14 +181,16 @@ static struct attribute *fpga_region_attrs[] = {
ATTRIBUTE_GROUPS(fpga_region);
/**
- * fpga_region_register_full - create and register an FPGA Region device
+ * __fpga_region_register_full - create and register an FPGA Region device
* @parent: device parent
* @info: parameters for FPGA Region
+ * @owner: module containing the get_bridges function
*
* Return: struct fpga_region or ERR_PTR()
*/
struct fpga_region *
-fpga_region_register_full(struct device *parent, const struct fpga_region_info *info)
+__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
+ struct module *owner)
{
struct fpga_region *region;
int id, ret = 0;
@@ -212,11 +215,12 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
region->compat_id = info->compat_id;
region->priv = info->priv;
region->get_bridges = info->get_bridges;
+ region->ops_owner = owner;
mutex_init(&region->mutex);
INIT_LIST_HEAD(&region->bridge_list);
- region->dev.class = fpga_region_class;
+ region->dev.class = &fpga_region_class;
region->dev.parent = parent;
region->dev.of_node = parent->of_node;
region->dev.id = id;
@@ -240,13 +244,14 @@ err_free:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(fpga_region_register_full);
+EXPORT_SYMBOL_GPL(__fpga_region_register_full);
/**
- * fpga_region_register - create and register an FPGA Region device
+ * __fpga_region_register - create and register an FPGA Region device
* @parent: device parent
* @mgr: manager that programs this region
* @get_bridges: optional function to get bridges to a list
+ * @owner: module containing the get_bridges function
*
* This simple version of the register function should be sufficient for most users.
* The fpga_region_register_full() function is available for users that need to
@@ -255,17 +260,17 @@ EXPORT_SYMBOL_GPL(fpga_region_register_full);
* Return: struct fpga_region or ERR_PTR()
*/
struct fpga_region *
-fpga_region_register(struct device *parent, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *))
+__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *), struct module *owner)
{
struct fpga_region_info info = { 0 };
info.mgr = mgr;
info.get_bridges = get_bridges;
- return fpga_region_register_full(parent, &info);
+ return __fpga_region_register_full(parent, &info, owner);
}
-EXPORT_SYMBOL_GPL(fpga_region_register);
+EXPORT_SYMBOL_GPL(__fpga_region_register);
/**
* fpga_region_unregister - unregister an FPGA region
@@ -287,25 +292,25 @@ static void fpga_region_dev_release(struct device *dev)
kfree(region);
}
+static const struct class fpga_region_class = {
+ .name = "fpga_region",
+ .dev_groups = fpga_region_groups,
+ .dev_release = fpga_region_dev_release,
+};
+
/**
- * fpga_region_init - init function for fpga_region class
- * Creates the fpga_region class and registers a reconfig notifier.
+ * fpga_region_init - creates the fpga_region class.
+ *
+ * Return: 0 on success or ERR_PTR() on error.
*/
static int __init fpga_region_init(void)
{
- fpga_region_class = class_create(THIS_MODULE, "fpga_region");
- if (IS_ERR(fpga_region_class))
- return PTR_ERR(fpga_region_class);
-
- fpga_region_class->dev_groups = fpga_region_groups;
- fpga_region_class->dev_release = fpga_region_dev_release;
-
- return 0;
+ return class_register(&fpga_region_class);
}
static void __exit fpga_region_exit(void)
{
- class_destroy(fpga_region_class);
+ class_unregister(&fpga_region_class);
ida_destroy(&fpga_region_ida);
}
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 7cbb3558b844..62c30266130d 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -10,8 +10,8 @@
#include <linux/fpga/fpga-mgr.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/stringify.h>
@@ -66,7 +66,7 @@ static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
}
/* Lock the bus, assert CRESET_B and SS_B and delay >200ns */
- spi_bus_lock(dev->master);
+ spi_bus_lock(dev->controller);
gpiod_set_value(priv->reset, 1);
@@ -94,7 +94,7 @@ static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
ret = spi_sync_locked(dev, &message);
fail:
- spi_bus_unlock(dev->master);
+ spi_bus_unlock(dev->controller);
return ret;
}
@@ -199,7 +199,7 @@ static struct spi_driver ice40_fpga_driver = {
.probe = ice40_fpga_probe,
.driver = {
.name = "ice40spi",
- .of_match_table = of_match_ptr(ice40_fpga_of_match),
+ .of_match_table = ice40_fpga_of_match,
},
.id_table = ice40_fpga_spi_ids,
};
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 79d48852825e..10f678b9ed36 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -14,6 +14,12 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+struct m10bmc_sec;
+
+struct m10bmc_sec_ops {
+ int (*rsu_status)(struct m10bmc_sec *sec);
+};
+
struct m10bmc_sec {
struct device *dev;
struct intel_m10bmc *m10bmc;
@@ -21,6 +27,7 @@ struct m10bmc_sec {
char *fw_name;
u32 fw_name_id;
bool cancel_request;
+ const struct m10bmc_sec_ops *ops;
};
static DEFINE_XARRAY_ALLOC(fw_upload_xa);
@@ -31,6 +38,71 @@ static DEFINE_XARRAY_ALLOC(fw_upload_xa);
#define REH_MAGIC GENMASK(15, 0)
#define REH_SHA_NUM_BYTES GENMASK(31, 16)
+static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size)
+{
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
+ u32 write_count = size / stride;
+ u32 leftover_offset = write_count * stride;
+ u32 leftover_size = size - leftover_offset;
+ u32 leftover_tmp = 0;
+ int ret;
+
+ if (sec->m10bmc->flash_bulk_ops)
+ return sec->m10bmc->flash_bulk_ops->write(m10bmc, buf, offset, size);
+
+ if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
+ return -EINVAL;
+
+ ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset,
+ buf + offset, write_count);
+ if (ret)
+ return ret;
+
+ /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */
+ if (leftover_size) {
+ memcpy(&leftover_tmp, buf + leftover_offset, leftover_size);
+ ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset,
+ leftover_tmp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size)
+{
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
+ u32 read_count = size / stride;
+ u32 leftover_offset = read_count * stride;
+ u32 leftover_size = size - leftover_offset;
+ u32 leftover_tmp;
+ int ret;
+
+ if (sec->m10bmc->flash_bulk_ops)
+ return sec->m10bmc->flash_bulk_ops->read(m10bmc, buf, addr, size);
+
+ if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
+ return -EINVAL;
+
+ ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count);
+ if (ret)
+ return ret;
+
+ /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */
+ if (leftover_size) {
+ ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp);
+ if (ret)
+ return ret;
+ memcpy(buf + leftover_offset, &leftover_tmp, leftover_size);
+ }
+
+ return 0;
+}
+
+
static ssize_t
show_root_entry_hash(struct device *dev, u32 exp_magic,
u32 prog_addr, u32 reh_addr, char *buf)
@@ -38,11 +110,9 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
struct m10bmc_sec *sec = dev_get_drvdata(dev);
int sha_num_bytes, i, ret, cnt = 0;
u8 hash[REH_SHA384_SIZE];
- unsigned int stride;
u32 magic;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
- ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic);
+ ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic));
if (ret)
return ret;
@@ -50,19 +120,16 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return sysfs_emit(buf, "hash not programmed\n");
sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
- if ((sha_num_bytes % stride) ||
- (sha_num_bytes != REH_SHA256_SIZE &&
- sha_num_bytes != REH_SHA384_SIZE)) {
+ if (sha_num_bytes != REH_SHA256_SIZE &&
+ sha_num_bytes != REH_SHA384_SIZE) {
dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
sha_num_bytes);
return -EINVAL;
}
- ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr,
- hash, sha_num_bytes / stride);
+ ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes);
if (ret) {
- dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n",
- reh_addr, sha_num_bytes / stride, ret);
+ dev_err(dev, "failed to read root entry hash\n");
return ret;
}
@@ -73,16 +140,24 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return cnt;
}
-#define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \
+#define DEVICE_ATTR_SEC_REH_RO(_name) \
static ssize_t _name##_root_entry_hash_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
-{ return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \
+{ \
+ struct m10bmc_sec *sec = dev_get_drvdata(dev); \
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
+ \
+ return show_root_entry_hash(dev, csr_map->_name##_magic, \
+ csr_map->_name##_prog_addr, \
+ csr_map->_name##_reh_addr, \
+ buf); \
+} \
static DEVICE_ATTR_RO(_name##_root_entry_hash)
-DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR);
-DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR);
-DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
+DEVICE_ATTR_SEC_REH_RO(bmc);
+DEVICE_ATTR_SEC_REH_RO(sr);
+DEVICE_ATTR_SEC_REH_RO(pr);
#define CSK_BIT_LEN 128U
#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
@@ -90,27 +165,16 @@ DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
static ssize_t
show_canceled_csk(struct device *dev, u32 addr, char *buf)
{
- unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32);
+ unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32);
struct m10bmc_sec *sec = dev_get_drvdata(dev);
DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
__le32 csk_le32[CSK_32ARRAY_SIZE];
u32 csk32[CSK_32ARRAY_SIZE];
int ret;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
- if (size % stride) {
- dev_err(sec->dev,
- "CSK vector size (0x%x) not aligned to stride (0x%x)\n",
- size, stride);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
- ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32,
- size / stride);
+ ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size);
if (ret) {
- dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n",
- addr, size / stride, ret);
+ dev_err(sec->dev, "failed to read CSK vector\n");
return ret;
}
@@ -122,18 +186,25 @@ show_canceled_csk(struct device *dev, u32 addr, char *buf)
return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
}
-#define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \
+#define DEVICE_ATTR_SEC_CSK_RO(_name) \
static ssize_t _name##_canceled_csks_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
-{ return show_canceled_csk(dev, _addr, buf); } \
+{ \
+ struct m10bmc_sec *sec = dev_get_drvdata(dev); \
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
+ \
+ return show_canceled_csk(dev, \
+ csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \
+ buf); \
+} \
static DEVICE_ATTR_RO(_name##_canceled_csks)
#define CSK_VEC_OFFSET 0x34
-DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET);
-DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET);
-DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET);
+DEVICE_ATTR_SEC_CSK_RO(bmc);
+DEVICE_ATTR_SEC_CSK_RO(sr);
+DEVICE_ATTR_SEC_CSK_RO(pr);
#define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
@@ -141,31 +212,21 @@ static ssize_t flash_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct m10bmc_sec *sec = dev_get_drvdata(dev);
- unsigned int stride, num_bits;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ unsigned int num_bits;
u8 *flash_buf;
int cnt, ret;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
num_bits = FLASH_COUNT_SIZE * 8;
- if (FLASH_COUNT_SIZE % stride) {
- dev_err(sec->dev,
- "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
- FLASH_COUNT_SIZE, stride);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
if (!flash_buf)
return -ENOMEM;
- ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
- flash_buf, FLASH_COUNT_SIZE / stride);
+ ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter,
+ FLASH_COUNT_SIZE);
if (ret) {
- dev_err(sec->dev,
- "failed to read flash count: %x cnt %x: %d\n",
- STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret);
+ dev_err(sec->dev, "failed to read flash count\n");
goto exit_free;
}
cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
@@ -200,25 +261,94 @@ static const struct attribute_group *m10bmc_sec_attr_groups[] = {
static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 auth_result;
- dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell);
+ dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell);
- if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result))
+ if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result))
dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
}
+static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(DRBL_RSU_STATUS, doorbell);
+}
+
+static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 auth_result;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(AUTH_RESULT_RSU_STATUS, auth_result);
+}
+
+static bool rsu_status_ok(u32 status)
+{
+ return (status == RSU_STAT_NORMAL ||
+ status == RSU_STAT_NIOS_OK ||
+ status == RSU_STAT_USER_OK ||
+ status == RSU_STAT_FACTORY_OK);
+}
+
+static bool rsu_progress_done(u32 progress)
+{
+ return (progress == RSU_PROG_IDLE ||
+ progress == RSU_PROG_RSU_DONE);
+}
+
+static bool rsu_progress_busy(u32 progress)
+{
+ return (progress == RSU_PROG_AUTHENTICATING ||
+ progress == RSU_PROG_COPYING ||
+ progress == RSU_PROG_UPDATE_CANCEL ||
+ progress == RSU_PROG_PROGRAM_KEY_HASH);
+}
+
+static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg,
+ u32 *progress, u32 *status)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg);
+ if (ret)
+ return ret;
+
+ ret = sec->ops->rsu_status(sec);
+ if (ret < 0)
+ return ret;
+
+ *status = ret;
+ *progress = rsu_prog(*doorbell_reg);
+
+ return 0;
+}
+
static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
- if (rsu_prog(doorbell) != RSU_PROG_IDLE &&
- rsu_prog(doorbell) != RSU_PROG_RSU_DONE) {
+ if (!rsu_progress_done(rsu_prog(doorbell))) {
log_error_regs(sec, doorbell);
return FW_UPLOAD_ERR_BUSY;
}
@@ -226,19 +356,15 @@ static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_NONE;
}
-static inline bool rsu_start_done(u32 doorbell)
+static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status)
{
- u32 status, progress;
-
- if (doorbell & DRBL_RSU_REQUEST)
+ if (doorbell_reg & DRBL_RSU_REQUEST)
return false;
- status = rsu_stat(doorbell);
if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
return true;
- progress = rsu_prog(doorbell);
- if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE)
+ if (!rsu_progress_done(progress))
return true;
return false;
@@ -246,38 +372,37 @@ static inline bool rsu_start_done(u32 doorbell)
static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
{
- u32 doorbell, status;
- int ret;
-
- ret = regmap_update_bits(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
- DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
- DRBL_RSU_REQUEST |
- FIELD_PREP(DRBL_HOST_STATUS,
- HOST_STATUS_IDLE));
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell_reg, progress, status;
+ int ret, err;
+
+ ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
+ DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
+ DRBL_RSU_REQUEST |
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_IDLE));
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
- ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
- doorbell,
- rsu_start_done(doorbell),
- NIOS_HANDSHAKE_INTERVAL_US,
- NIOS_HANDSHAKE_TIMEOUT_US);
+ ret = read_poll_timeout(m10bmc_sec_progress_status, err,
+ err < 0 || rsu_start_done(doorbell_reg, progress, status),
+ NIOS_HANDSHAKE_INTERVAL_US,
+ NIOS_HANDSHAKE_TIMEOUT_US,
+ false,
+ sec, &doorbell_reg, &progress, &status);
if (ret == -ETIMEDOUT) {
- log_error_regs(sec, doorbell);
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
- } else if (ret) {
+ } else if (err) {
return FW_UPLOAD_ERR_RW_ERROR;
}
- status = rsu_stat(doorbell);
if (status == RSU_STAT_WEAROUT) {
dev_warn(sec->dev, "Excessive flash update count detected\n");
return FW_UPLOAD_ERR_WEAROUT;
} else if (status == RSU_STAT_ERASE_FAIL) {
- log_error_regs(sec, doorbell);
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
@@ -286,11 +411,12 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
unsigned long poll_timeout;
u32 doorbell, progress;
int ret;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@@ -300,7 +426,7 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
if (time_after(jiffies, poll_timeout))
break;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
}
@@ -319,91 +445,80 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
{
- u32 doorbell;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell_reg, status;
int ret;
- ret = regmap_update_bits(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
- DRBL_HOST_STATUS,
- FIELD_PREP(DRBL_HOST_STATUS,
- HOST_STATUS_WRITE_DONE));
+ ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
+ DRBL_HOST_STATUS,
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_WRITE_DONE));
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
- doorbell,
- rsu_prog(doorbell) != RSU_PROG_READY,
+ csr_map->base + csr_map->doorbell,
+ doorbell_reg,
+ rsu_prog(doorbell_reg) != RSU_PROG_READY,
NIOS_HANDSHAKE_INTERVAL_US,
NIOS_HANDSHAKE_TIMEOUT_US);
if (ret == -ETIMEDOUT) {
- log_error_regs(sec, doorbell);
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
} else if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
}
- switch (rsu_stat(doorbell)) {
- case RSU_STAT_NORMAL:
- case RSU_STAT_NIOS_OK:
- case RSU_STAT_USER_OK:
- case RSU_STAT_FACTORY_OK:
- break;
- default:
- log_error_regs(sec, doorbell);
+ ret = sec->ops->rsu_status(sec);
+ if (ret < 0)
+ return FW_UPLOAD_ERR_HW_ERROR;
+ status = ret;
+
+ if (!rsu_status_ok(status)) {
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
return FW_UPLOAD_ERR_NONE;
}
-static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell)
+static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg)
{
- if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell))
+ u32 progress, status;
+
+ if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status))
return -EIO;
- switch (rsu_stat(*doorbell)) {
- case RSU_STAT_NORMAL:
- case RSU_STAT_NIOS_OK:
- case RSU_STAT_USER_OK:
- case RSU_STAT_FACTORY_OK:
- break;
- default:
+ if (!rsu_status_ok(status))
return -EINVAL;
- }
- switch (rsu_prog(*doorbell)) {
- case RSU_PROG_IDLE:
- case RSU_PROG_RSU_DONE:
+ if (rsu_progress_done(progress))
return 0;
- case RSU_PROG_AUTHENTICATING:
- case RSU_PROG_COPYING:
- case RSU_PROG_UPDATE_CANCEL:
- case RSU_PROG_PROGRAM_KEY_HASH:
+
+ if (rsu_progress_busy(progress))
return -EAGAIN;
- default:
- return -EINVAL;
- }
+
+ return -EINVAL;
}
static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
if (rsu_prog(doorbell) != RSU_PROG_READY)
return FW_UPLOAD_ERR_BUSY;
- ret = regmap_update_bits(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
- DRBL_HOST_STATUS,
- FIELD_PREP(DRBL_HOST_STATUS,
- HOST_STATUS_ABORT_RSU));
+ ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
+ DRBL_HOST_STATUS,
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_ABORT_RSU));
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@@ -414,46 +529,65 @@ static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
const u8 *data, u32 size)
{
struct m10bmc_sec *sec = fwl->dd_handle;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 ret;
sec->cancel_request = false;
- if (!size || size > M10BMC_STAGING_SIZE)
+ if (!size || size > csr_map->staging_size)
return FW_UPLOAD_ERR_INVALID_SIZE;
+ if (sec->m10bmc->flash_bulk_ops)
+ if (sec->m10bmc->flash_bulk_ops->lock_write(sec->m10bmc))
+ return FW_UPLOAD_ERR_BUSY;
+
ret = rsu_check_idle(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- return ret;
+ goto unlock_flash;
+
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PREPARE);
ret = rsu_update_init(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- return ret;
+ goto fw_state_exit;
ret = rsu_prog_ready(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- return ret;
+ goto fw_state_exit;
- if (sec->cancel_request)
- return rsu_cancel(sec);
+ if (sec->cancel_request) {
+ ret = rsu_cancel(sec);
+ goto fw_state_exit;
+ }
+
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_WRITE);
return FW_UPLOAD_ERR_NONE;
+
+fw_state_exit:
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL);
+
+unlock_flash:
+ if (sec->m10bmc->flash_bulk_ops)
+ sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
+ return ret;
}
#define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
-static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data,
- u32 offset, u32 size, u32 *written)
+static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data,
+ u32 offset, u32 size, u32 *written)
{
struct m10bmc_sec *sec = fwl->dd_handle;
- u32 blk_size, doorbell, extra_offset;
- unsigned int stride, extra = 0;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ u32 blk_size, doorbell;
int ret;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
if (sec->cancel_request)
return rsu_cancel(sec);
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell);
if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
} else if (rsu_prog(doorbell) != RSU_PROG_READY) {
@@ -461,28 +595,12 @@ static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data
return FW_UPLOAD_ERR_HW_ERROR;
}
- WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride);
+ WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap));
blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
- ret = regmap_bulk_write(sec->m10bmc->regmap,
- M10BMC_STAGING_BASE + offset,
- (void *)data + offset,
- blk_size / stride);
+ ret = m10bmc_sec_write(sec, data, offset, blk_size);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
- /*
- * If blk_size is not aligned to stride, then handle the extra
- * bytes with regmap_write.
- */
- if (blk_size % stride) {
- extra_offset = offset + ALIGN_DOWN(blk_size, stride);
- memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride);
- ret = regmap_write(sec->m10bmc->regmap,
- M10BMC_STAGING_BASE + extra_offset, extra);
- if (ret)
- return FW_UPLOAD_ERR_RW_ERROR;
- }
-
*written = blk_size;
return FW_UPLOAD_ERR_NONE;
}
@@ -497,6 +615,8 @@ static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl)
if (sec->cancel_request)
return rsu_cancel(sec);
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PROGRAM);
+
result = rsu_send_data(sec);
if (result != FW_UPLOAD_ERR_NONE)
return result;
@@ -539,16 +659,29 @@ static void m10bmc_sec_cleanup(struct fw_upload *fwl)
struct m10bmc_sec *sec = fwl->dd_handle;
(void)rsu_cancel(sec);
+
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL);
+
+ if (sec->m10bmc->flash_bulk_ops)
+ sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
}
static const struct fw_upload_ops m10bmc_ops = {
.prepare = m10bmc_sec_prepare,
- .write = m10bmc_sec_write,
+ .write = m10bmc_sec_fw_write,
.poll_complete = m10bmc_sec_poll_complete,
.cancel = m10bmc_sec_cancel,
.cleanup = m10bmc_sec_cleanup,
};
+static const struct m10bmc_sec_ops m10sec_n3000_ops = {
+ .rsu_status = m10bmc_sec_n3000_rsu_status,
+};
+
+static const struct m10bmc_sec_ops m10sec_n6000_ops = {
+ .rsu_status = m10bmc_sec_n6000_rsu_status,
+};
+
#define SEC_UPDATE_LEN_MAX 32
static int m10bmc_sec_probe(struct platform_device *pdev)
{
@@ -564,6 +697,7 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
sec->dev = &pdev->dev;
sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
+ sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data;
dev_set_drvdata(&pdev->dev, sec);
ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
@@ -574,39 +708,50 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
sec->fw_name_id);
sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
- if (!sec->fw_name)
- return -ENOMEM;
+ if (!sec->fw_name) {
+ ret = -ENOMEM;
+ goto fw_name_fail;
+ }
fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
&m10bmc_ops, sec);
if (IS_ERR(fwl)) {
dev_err(sec->dev, "Firmware Upload driver failed to start\n");
- kfree(sec->fw_name);
- xa_erase(&fw_upload_xa, sec->fw_name_id);
- return PTR_ERR(fwl);
+ ret = PTR_ERR(fwl);
+ goto fw_uploader_fail;
}
sec->fwl = fwl;
return 0;
+
+fw_uploader_fail:
+ kfree(sec->fw_name);
+fw_name_fail:
+ xa_erase(&fw_upload_xa, sec->fw_name_id);
+ return ret;
}
-static int m10bmc_sec_remove(struct platform_device *pdev)
+static void m10bmc_sec_remove(struct platform_device *pdev)
{
struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
firmware_upload_unregister(sec->fwl);
kfree(sec->fw_name);
xa_erase(&fw_upload_xa, sec->fw_name_id);
-
- return 0;
}
static const struct platform_device_id intel_m10bmc_sec_ids[] = {
{
.name = "n3000bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
},
{
.name = "d5005bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
+ },
+ {
+ .name = "n6000bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n6000_ops,
},
{ }
};
@@ -626,3 +771,4 @@ module_platform_driver(intel_m10bmc_sec_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("INTEL_M10_BMC_CORE");
diff --git a/drivers/fpga/lattice-sysconfig-spi.c b/drivers/fpga/lattice-sysconfig-spi.c
index 2702b26b7f55..44691cfcf50a 100644
--- a/drivers/fpga/lattice-sysconfig-spi.c
+++ b/drivers/fpga/lattice-sysconfig-spi.c
@@ -3,6 +3,7 @@
* Lattice FPGA programming over slave SPI sysCONFIG interface.
*/
+#include <linux/of.h>
#include <linux/spi/spi.h>
#include "lattice-sysconfig.h"
diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
index 7436976ea904..6134cea86ac8 100644
--- a/drivers/fpga/microchip-spi.c
+++ b/drivers/fpga/microchip-spi.c
@@ -3,11 +3,12 @@
* Microchip Polarfire FPGA programming over slave SPI interface.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/fpga/fpga-mgr.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/spi/spi.h>
#define MPF_SPI_ISC_ENABLE 0x0B
@@ -33,7 +34,7 @@
#define MPF_BITS_PER_COMPONENT_SIZE 22
-#define MPF_STATUS_POLL_RETRIES 10000
+#define MPF_STATUS_POLL_TIMEOUT (2 * USEC_PER_SEC)
#define MPF_STATUS_BUSY BIT(0)
#define MPF_STATUS_READY BIT(1)
#define MPF_STATUS_SPI_VIOLATION BIT(2)
@@ -42,46 +43,55 @@
struct mpf_priv {
struct spi_device *spi;
bool program_mode;
+ u8 tx __aligned(ARCH_KMALLOC_MINALIGN);
+ u8 rx;
};
-static int mpf_read_status(struct spi_device *spi)
+static int mpf_read_status(struct mpf_priv *priv)
{
- u8 status = 0, status_command = MPF_SPI_READ_STATUS;
- struct spi_transfer xfers[2] = { 0 };
- int ret;
-
/*
* HW status is returned on MISO in the first byte after CS went
* active. However, first reading can be inadequate, so we submit
* two identical SPI transfers and use result of the later one.
*/
- xfers[0].tx_buf = &status_command;
- xfers[1].tx_buf = &status_command;
- xfers[0].rx_buf = &status;
- xfers[1].rx_buf = &status;
- xfers[0].len = 1;
- xfers[1].len = 1;
- xfers[0].cs_change = 1;
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = &priv->tx,
+ .rx_buf = &priv->rx,
+ .len = 1,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &priv->tx,
+ .rx_buf = &priv->rx,
+ .len = 1,
+ },
+ };
+ u8 status;
+ int ret;
- ret = spi_sync_transfer(spi, xfers, 2);
+ priv->tx = MPF_SPI_READ_STATUS;
+
+ ret = spi_sync_transfer(priv->spi, xfers, 2);
+ if (ret)
+ return ret;
+
+ status = priv->rx;
if ((status & MPF_STATUS_SPI_VIOLATION) ||
(status & MPF_STATUS_SPI_ERROR))
- ret = -EIO;
+ return -EIO;
- return ret ? : status;
+ return status;
}
static enum fpga_mgr_states mpf_ops_state(struct fpga_manager *mgr)
{
struct mpf_priv *priv = mgr->priv;
- struct spi_device *spi;
bool program_mode;
int status;
- spi = priv->spi;
program_mode = priv->program_mode;
- status = mpf_read_status(spi);
+ status = mpf_read_status(priv);
if (!program_mode && !status)
return FPGA_MGR_STATE_OPERATING;
@@ -185,52 +195,53 @@ static int mpf_ops_parse_header(struct fpga_manager *mgr,
return 0;
}
-/* Poll HW status until busy bit is cleared and mask bits are set. */
-static int mpf_poll_status(struct spi_device *spi, u8 mask)
+static int mpf_poll_status(struct mpf_priv *priv, u8 mask)
{
- int status, retries = MPF_STATUS_POLL_RETRIES;
-
- while (retries--) {
- status = mpf_read_status(spi);
- if (status < 0)
- return status;
+ int ret, status;
- if (status & MPF_STATUS_BUSY)
- continue;
-
- if (!mask || (status & mask))
- return status;
- }
+ /*
+ * Busy poll HW status. Polling stops if any of the following
+ * conditions are met:
+ * - timeout is reached
+ * - mpf_read_status() returns an error
+ * - busy bit is cleared AND mask bits are set
+ */
+ ret = read_poll_timeout(mpf_read_status, status,
+ (status < 0) ||
+ ((status & (MPF_STATUS_BUSY | mask)) == mask),
+ 0, MPF_STATUS_POLL_TIMEOUT, false, priv);
+ if (ret < 0)
+ return ret;
- return -EBUSY;
+ return status;
}
-static int mpf_spi_write(struct spi_device *spi, const void *buf, size_t buf_size)
+static int mpf_spi_write(struct mpf_priv *priv, const void *buf, size_t buf_size)
{
- int status = mpf_poll_status(spi, 0);
+ int status = mpf_poll_status(priv, 0);
if (status < 0)
return status;
- return spi_write(spi, buf, buf_size);
+ return spi_write_then_read(priv->spi, buf, buf_size, NULL, 0);
}
-static int mpf_spi_write_then_read(struct spi_device *spi,
+static int mpf_spi_write_then_read(struct mpf_priv *priv,
const void *txbuf, size_t txbuf_size,
void *rxbuf, size_t rxbuf_size)
{
const u8 read_command[] = { MPF_SPI_READ_DATA };
int ret;
- ret = mpf_spi_write(spi, txbuf, txbuf_size);
+ ret = mpf_spi_write(priv, txbuf, txbuf_size);
if (ret)
return ret;
- ret = mpf_poll_status(spi, MPF_STATUS_READY);
+ ret = mpf_poll_status(priv, MPF_STATUS_READY);
if (ret < 0)
return ret;
- return spi_write_then_read(spi, read_command, sizeof(read_command),
+ return spi_write_then_read(priv->spi, read_command, sizeof(read_command),
rxbuf, rxbuf_size);
}
@@ -242,7 +253,6 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
const u8 isc_en_command[] = { MPF_SPI_ISC_ENABLE };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
- struct spi_device *spi;
u32 isc_ret = 0;
int ret;
@@ -251,9 +261,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return -EOPNOTSUPP;
}
- spi = priv->spi;
-
- ret = mpf_spi_write_then_read(spi, isc_en_command, sizeof(isc_en_command),
+ ret = mpf_spi_write_then_read(priv, isc_en_command, sizeof(isc_en_command),
&isc_ret, sizeof(isc_ret));
if (ret || isc_ret) {
dev_err(dev, "Failed to enable ISC: spi_ret %d, isc_ret %u\n",
@@ -261,7 +269,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return -EFAULT;
}
- ret = mpf_spi_write(spi, program_mode, sizeof(program_mode));
+ ret = mpf_spi_write(priv, program_mode, sizeof(program_mode));
if (ret) {
dev_err(dev, "Failed to enter program mode: %d\n", ret);
return ret;
@@ -272,13 +280,32 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return 0;
}
+static int mpf_spi_frame_write(struct mpf_priv *priv, const char *buf)
+{
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = &priv->tx,
+ .len = 1,
+ }, {
+ .tx_buf = buf,
+ .len = MPF_SPI_FRAME_SIZE,
+ },
+ };
+ int ret;
+
+ ret = mpf_poll_status(priv, 0);
+ if (ret < 0)
+ return ret;
+
+ priv->tx = MPF_SPI_FRAME;
+
+ return spi_sync_transfer(priv->spi, xfers, ARRAY_SIZE(xfers));
+}
+
static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count)
{
- u8 spi_frame_command[] = { MPF_SPI_FRAME };
- struct spi_transfer xfers[2] = { 0 };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
- struct spi_device *spi;
int ret, i;
if (count % MPF_SPI_FRAME_SIZE) {
@@ -287,19 +314,8 @@ static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count
return -EINVAL;
}
- spi = priv->spi;
-
- xfers[0].tx_buf = spi_frame_command;
- xfers[0].len = sizeof(spi_frame_command);
-
for (i = 0; i < count / MPF_SPI_FRAME_SIZE; i++) {
- xfers[1].tx_buf = buf + i * MPF_SPI_FRAME_SIZE;
- xfers[1].len = MPF_SPI_FRAME_SIZE;
-
- ret = mpf_poll_status(spi, 0);
- if (ret >= 0)
- ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
-
+ ret = mpf_spi_frame_write(priv, buf + i * MPF_SPI_FRAME_SIZE);
if (ret) {
dev_err(dev, "Failed to write bitstream frame %d/%zu\n",
i, count / MPF_SPI_FRAME_SIZE);
@@ -317,12 +333,9 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
const u8 release_command[] = { MPF_SPI_RELEASE };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
- struct spi_device *spi;
int ret;
- spi = priv->spi;
-
- ret = mpf_spi_write(spi, isc_dis_command, sizeof(isc_dis_command));
+ ret = mpf_spi_write(priv, isc_dis_command, sizeof(isc_dis_command));
if (ret) {
dev_err(dev, "Failed to disable ISC: %d\n", ret);
return ret;
@@ -330,7 +343,7 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
usleep_range(1000, 2000);
- ret = mpf_spi_write(spi, release_command, sizeof(release_command));
+ ret = mpf_spi_write(priv, release_command, sizeof(release_command));
if (ret) {
dev_err(dev, "Failed to exit program mode: %d\n", ret);
return ret;
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index ae82532fc127..43db4bb77138 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -12,7 +12,9 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -423,15 +425,13 @@ eprobe_mgr_put:
return ret;
}
-static int of_fpga_region_remove(struct platform_device *pdev)
+static void of_fpga_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = platform_get_drvdata(pdev);
struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
fpga_mgr_put(mgr);
-
- return 0;
}
static struct platform_driver of_fpga_region_driver = {
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index ac8e89b8a5cc..0165a3c86932 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -471,7 +471,6 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
struct a10_fpga_priv *priv;
void __iomem *reg_base;
struct fpga_manager *mgr;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -479,14 +478,12 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
return -ENOMEM;
/* First mmio base is for register access */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
/* Second mmio base is for writing FPGA image data */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->fpga_data_addr = devm_ioremap_resource(dev, res);
+ priv->fpga_data_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->fpga_data_addr))
return PTR_ERR(priv->fpga_data_addr);
@@ -520,15 +517,13 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
return 0;
}
-static int socfpga_a10_fpga_remove(struct platform_device *pdev)
+static void socfpga_a10_fpga_remove(struct platform_device *pdev)
{
struct fpga_manager *mgr = platform_get_drvdata(pdev);
struct a10_fpga_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static const struct of_device_id socfpga_a10_fpga_of_match[] = {
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 7e0741f99696..b08b4bb8f650 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -301,16 +301,17 @@ static irqreturn_t socfpga_fpga_isr(int irq, void *dev_id)
static int socfpga_fpga_wait_for_config_done(struct socfpga_fpga_priv *priv)
{
- int timeout, ret = 0;
+ int ret = 0;
+ long time_left;
socfpga_fpga_disable_irqs(priv);
init_completion(&priv->status_complete);
socfpga_fpga_enable_irqs(priv, SOCFPGA_FPGMGR_MON_CONF_DONE);
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&priv->status_complete,
msecs_to_jiffies(10));
- if (timeout == 0)
+ if (time_left == 0)
ret = -ETIMEDOUT;
socfpga_fpga_disable_irqs(priv);
@@ -545,20 +546,17 @@ static int socfpga_fpga_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct socfpga_fpga_priv *priv;
struct fpga_manager *mgr;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->fpga_base_addr = devm_ioremap_resource(dev, res);
+ priv->fpga_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->fpga_base_addr))
return PTR_ERR(priv->fpga_base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->fpga_data_addr = devm_ioremap_resource(dev, res);
+ priv->fpga_data_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->fpga_data_addr))
return PTR_ERR(priv->fpga_data_addr);
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 357cea58ec98..0a295ccf1644 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
/*
* FPGA programming requires a higher level of privilege (EL3), per the SoC
@@ -213,9 +214,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
/* Allocate buffers from the service layer's pool. */
for (i = 0; i < NUM_SVC_BUFS; i++) {
kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
- if (!kbuf) {
+ if (IS_ERR(kbuf)) {
s10_free_buffers(mgr);
- ret = -ENOMEM;
+ ret = PTR_ERR(kbuf);
goto init_done;
}
@@ -435,15 +436,13 @@ probe_err:
return ret;
}
-static int s10_remove(struct platform_device *pdev)
+static void s10_remove(struct platform_device *pdev)
{
struct fpga_manager *mgr = platform_get_drvdata(pdev);
struct s10_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr);
stratix10_svc_free_channel(priv->chan);
-
- return 0;
}
static const struct of_device_id s10_of_match[] = {
diff --git a/drivers/fpga/tests/.kunitconfig b/drivers/fpga/tests/.kunitconfig
new file mode 100644
index 000000000000..a1c2a2974c39
--- /dev/null
+++ b/drivers/fpga/tests/.kunitconfig
@@ -0,0 +1,5 @@
+CONFIG_KUNIT=y
+CONFIG_FPGA=y
+CONFIG_FPGA_REGION=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_FPGA_KUNIT_TESTS=y
diff --git a/drivers/fpga/tests/Kconfig b/drivers/fpga/tests/Kconfig
new file mode 100644
index 000000000000..e4a64815f16d
--- /dev/null
+++ b/drivers/fpga/tests/Kconfig
@@ -0,0 +1,11 @@
+config FPGA_KUNIT_TESTS
+ tristate "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS
+ depends on FPGA && FPGA_REGION && FPGA_BRIDGE && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the FPGA subsystem
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/drivers/fpga/tests/Makefile b/drivers/fpga/tests/Makefile
new file mode 100644
index 000000000000..bb78215c645c
--- /dev/null
+++ b/drivers/fpga/tests/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for KUnit test suites for the FPGA subsystem
+#
+
+obj-$(CONFIG_FPGA_KUNIT_TESTS) += fpga-mgr-test.o fpga-bridge-test.o fpga-region-test.o
diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c
new file mode 100644
index 000000000000..124ba40e32b1
--- /dev/null
+++ b/drivers/fpga/tests/fpga-bridge-test.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the FPGA Bridge
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+struct bridge_stats {
+ bool enable;
+};
+
+struct bridge_ctx {
+ struct fpga_bridge *bridge;
+ struct device *dev;
+ struct bridge_stats stats;
+};
+
+/*
+ * Wrapper to avoid a cast warning when passing the action function directly
+ * to kunit_add_action().
+ */
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister,
+ struct fpga_bridge *);
+
+static int op_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct bridge_stats *stats = bridge->priv;
+
+ stats->enable = enable;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA bridge that implements only the enable_set op to track
+ * the state.
+ */
+static const struct fpga_bridge_ops fake_bridge_ops = {
+ .enable_set = op_enable_set,
+};
+
+/**
+ * register_test_bridge() - Register a fake FPGA bridge for testing.
+ * @test: KUnit test context object.
+ * @dev_name: name of the kunit device to be registered
+ *
+ * Return: Context of the newly registered FPGA bridge.
+ */
+static struct bridge_ctx *register_test_bridge(struct kunit *test, const char *dev_name)
+{
+ struct bridge_ctx *ctx;
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->dev = kunit_device_register(test, dev_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
+
+ ctx->bridge = fpga_bridge_register(ctx->dev, "Fake FPGA bridge", &fake_bridge_ops,
+ &ctx->stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge));
+
+ ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return ctx;
+}
+
+static void fpga_bridge_test_get(struct kunit *test)
+{
+ struct bridge_ctx *ctx = test->priv;
+ struct fpga_bridge *bridge;
+
+ bridge = fpga_bridge_get(ctx->dev, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bridge, ctx->bridge);
+
+ bridge = fpga_bridge_get(ctx->dev, NULL);
+ KUNIT_EXPECT_EQ(test, PTR_ERR(bridge), -EBUSY);
+
+ fpga_bridge_put(ctx->bridge);
+}
+
+static void fpga_bridge_test_toggle(struct kunit *test)
+{
+ struct bridge_ctx *ctx = test->priv;
+ int ret;
+
+ ret = fpga_bridge_disable(ctx->bridge);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_FALSE(test, ctx->stats.enable);
+
+ ret = fpga_bridge_enable(ctx->bridge);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, ctx->stats.enable);
+}
+
+/* Test the functions for getting and controlling a list of bridges */
+static void fpga_bridge_test_get_put_list(struct kunit *test)
+{
+ struct list_head bridge_list;
+ struct bridge_ctx *ctx_0, *ctx_1;
+ int ret;
+
+ ctx_0 = test->priv;
+ ctx_1 = register_test_bridge(test, "fpga-bridge-test-dev-1");
+
+ INIT_LIST_HEAD(&bridge_list);
+
+ /* Get bridge 0 and add it to the list */
+ ret = fpga_bridge_get_to_list(ctx_0->dev, NULL, &bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ctx_0->bridge,
+ list_first_entry_or_null(&bridge_list, struct fpga_bridge, node));
+
+ /* Get bridge 1 and add it to the list */
+ ret = fpga_bridge_get_to_list(ctx_1->dev, NULL, &bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ctx_1->bridge,
+ list_first_entry_or_null(&bridge_list, struct fpga_bridge, node));
+
+ /* Disable an then enable both bridges from the list */
+ ret = fpga_bridges_disable(&bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_FALSE(test, ctx_0->stats.enable);
+ KUNIT_EXPECT_FALSE(test, ctx_1->stats.enable);
+
+ ret = fpga_bridges_enable(&bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test, ctx_0->stats.enable);
+ KUNIT_EXPECT_TRUE(test, ctx_1->stats.enable);
+
+ /* Put and remove both bridges from the list */
+ fpga_bridges_put(&bridge_list);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&bridge_list));
+}
+
+static int fpga_bridge_test_init(struct kunit *test)
+{
+ test->priv = register_test_bridge(test, "fpga-bridge-test-dev-0");
+
+ return 0;
+}
+
+static struct kunit_case fpga_bridge_test_cases[] = {
+ KUNIT_CASE(fpga_bridge_test_get),
+ KUNIT_CASE(fpga_bridge_test_toggle),
+ KUNIT_CASE(fpga_bridge_test_get_put_list),
+ {}
+};
+
+static struct kunit_suite fpga_bridge_suite = {
+ .name = "fpga_bridge",
+ .init = fpga_bridge_test_init,
+ .test_cases = fpga_bridge_test_cases,
+};
+
+kunit_test_suite(fpga_bridge_suite);
+
+MODULE_DESCRIPTION("KUnit test for the FPGA Bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
new file mode 100644
index 000000000000..62975a39ee14
--- /dev/null
+++ b/drivers/fpga/tests/fpga-mgr-test.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the FPGA Manager
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#define HEADER_FILL 'H'
+#define IMAGE_FILL 'P'
+#define IMAGE_BLOCK 1024
+
+#define HEADER_SIZE IMAGE_BLOCK
+#define IMAGE_SIZE (IMAGE_BLOCK * 4)
+
+struct mgr_stats {
+ bool header_match;
+ bool image_match;
+ u32 seq_num;
+ u32 op_parse_header_seq;
+ u32 op_write_init_seq;
+ u32 op_write_seq;
+ u32 op_write_sg_seq;
+ u32 op_write_complete_seq;
+ enum fpga_mgr_states op_parse_header_state;
+ enum fpga_mgr_states op_write_init_state;
+ enum fpga_mgr_states op_write_state;
+ enum fpga_mgr_states op_write_sg_state;
+ enum fpga_mgr_states op_write_complete_state;
+};
+
+struct mgr_ctx {
+ struct fpga_image_info *img_info;
+ struct fpga_manager *mgr;
+ struct device *dev;
+ struct mgr_stats stats;
+};
+
+/*
+ * Wrappers to avoid cast warnings when passing action functions directly
+ * to kunit_add_action().
+ */
+KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
+ struct sg_table *);
+
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free,
+ struct fpga_image_info *);
+
+/**
+ * init_test_buffer() - Allocate and initialize a test image in a buffer.
+ * @test: KUnit test context object.
+ * @count: image size in bytes.
+ *
+ * Return: pointer to the newly allocated image.
+ */
+static char *init_test_buffer(struct kunit *test, size_t count)
+{
+ char *buf;
+
+ KUNIT_ASSERT_GE(test, count, HEADER_SIZE);
+
+ buf = kunit_kzalloc(test, count, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ memset(buf, HEADER_FILL, HEADER_SIZE);
+ memset(buf + HEADER_SIZE, IMAGE_FILL, count - HEADER_SIZE);
+
+ return buf;
+}
+
+/*
+ * Check the image header. Do not return an error code if the image check fails
+ * since, in this case, it is a failure of the FPGA manager itself, not this
+ * op that tests it.
+ */
+static int op_parse_header(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+ size_t i;
+
+ stats->op_parse_header_state = mgr->state;
+ stats->op_parse_header_seq = stats->seq_num++;
+
+ /* Set header_size and data_size for later */
+ info->header_size = HEADER_SIZE;
+ info->data_size = info->count - HEADER_SIZE;
+
+ stats->header_match = true;
+ for (i = 0; i < info->header_size; i++) {
+ if (buf[i] != HEADER_FILL) {
+ stats->header_match = false;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int op_write_init(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+
+ stats->op_write_init_state = mgr->state;
+ stats->op_write_init_seq = stats->seq_num++;
+
+ return 0;
+}
+
+/*
+ * Check the image data. As with op_parse_header, do not return an error code
+ * if the image check fails.
+ */
+static int op_write(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+ size_t i;
+
+ stats->op_write_state = mgr->state;
+ stats->op_write_seq = stats->seq_num++;
+
+ stats->image_match = true;
+ for (i = 0; i < count; i++) {
+ if (buf[i] != IMAGE_FILL) {
+ stats->image_match = false;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Check the image data, but first skip the header since write_sg will get
+ * the whole image in sg_table. As with op_parse_header, do not return an
+ * error code if the image check fails.
+ */
+static int op_write_sg(struct fpga_manager *mgr, struct sg_table *sgt)
+{
+ struct mgr_stats *stats = mgr->priv;
+ struct sg_mapping_iter miter;
+ char *img;
+ size_t i;
+
+ stats->op_write_sg_state = mgr->state;
+ stats->op_write_sg_seq = stats->seq_num++;
+
+ stats->image_match = true;
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+
+ if (!sg_miter_skip(&miter, HEADER_SIZE)) {
+ stats->image_match = false;
+ goto out;
+ }
+
+ while (sg_miter_next(&miter)) {
+ img = miter.addr;
+ for (i = 0; i < miter.length; i++) {
+ if (img[i] != IMAGE_FILL) {
+ stats->image_match = false;
+ goto out;
+ }
+ }
+ }
+out:
+ sg_miter_stop(&miter);
+ return 0;
+}
+
+static int op_write_complete(struct fpga_manager *mgr, struct fpga_image_info *info)
+{
+ struct mgr_stats *stats = mgr->priv;
+
+ stats->op_write_complete_state = mgr->state;
+ stats->op_write_complete_seq = stats->seq_num++;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA manager that implements all ops required to check the programming
+ * sequence using a single contiguous buffer and a scatter gather table.
+ */
+static const struct fpga_manager_ops fake_mgr_ops = {
+ .skip_header = true,
+ .parse_header = op_parse_header,
+ .write_init = op_write_init,
+ .write = op_write,
+ .write_sg = op_write_sg,
+ .write_complete = op_write_complete,
+};
+
+static void fpga_mgr_test_get(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ struct fpga_manager *mgr;
+
+ mgr = fpga_mgr_get(ctx->dev);
+ KUNIT_EXPECT_PTR_EQ(test, mgr, ctx->mgr);
+
+ fpga_mgr_put(ctx->mgr);
+}
+
+static void fpga_mgr_test_lock(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ int ret;
+
+ ret = fpga_mgr_lock(ctx->mgr);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = fpga_mgr_lock(ctx->mgr);
+ KUNIT_EXPECT_EQ(test, ret, -EBUSY);
+
+ fpga_mgr_unlock(ctx->mgr);
+}
+
+/* Check the programming sequence using an image in a buffer */
+static void fpga_mgr_test_img_load_buf(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ char *img_buf;
+ int ret;
+
+ img_buf = init_test_buffer(test, IMAGE_SIZE);
+
+ ctx->img_info->count = IMAGE_SIZE;
+ ctx->img_info->buf = img_buf;
+
+ ret = fpga_mgr_load(ctx->mgr, ctx->img_info);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test, ctx->stats.header_match);
+ KUNIT_EXPECT_TRUE(test, ctx->stats.image_match);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_parse_header_state, FPGA_MGR_STATE_PARSE_HEADER);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_state, FPGA_MGR_STATE_WRITE_INIT);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_state, FPGA_MGR_STATE_WRITE);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_state, FPGA_MGR_STATE_WRITE_COMPLETE);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_seq, ctx->stats.op_parse_header_seq + 2);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3);
+}
+
+/* Check the programming sequence using an image in a scatter gather table */
+static void fpga_mgr_test_img_load_sgt(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ struct sg_table *sgt;
+ char *img_buf;
+ int ret;
+
+ img_buf = init_test_buffer(test, IMAGE_SIZE);
+
+ sgt = kunit_kzalloc(test, sizeof(*sgt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE);
+
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ctx->img_info->sgt = sgt;
+
+ ret = fpga_mgr_load(ctx->mgr, ctx->img_info);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test, ctx->stats.header_match);
+ KUNIT_EXPECT_TRUE(test, ctx->stats.image_match);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_parse_header_state, FPGA_MGR_STATE_PARSE_HEADER);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_state, FPGA_MGR_STATE_WRITE_INIT);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_state, FPGA_MGR_STATE_WRITE);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_state, FPGA_MGR_STATE_WRITE_COMPLETE);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_seq, ctx->stats.op_parse_header_seq + 2);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3);
+}
+
+static int fpga_mgr_test_init(struct kunit *test)
+{
+ struct mgr_ctx *ctx;
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->dev = kunit_device_register(test, "fpga-manager-test-dev");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
+
+ ctx->mgr = devm_fpga_mgr_register(ctx->dev, "Fake FPGA Manager", &fake_mgr_ops,
+ &ctx->stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr));
+
+ ctx->img_info = fpga_image_info_alloc(ctx->dev);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->img_info);
+
+ ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, ctx->img_info);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ test->priv = ctx;
+
+ return 0;
+}
+
+static struct kunit_case fpga_mgr_test_cases[] = {
+ KUNIT_CASE(fpga_mgr_test_get),
+ KUNIT_CASE(fpga_mgr_test_lock),
+ KUNIT_CASE(fpga_mgr_test_img_load_buf),
+ KUNIT_CASE(fpga_mgr_test_img_load_sgt),
+ {}
+};
+
+static struct kunit_suite fpga_mgr_suite = {
+ .name = "fpga_mgr",
+ .init = fpga_mgr_test_init,
+ .test_cases = fpga_mgr_test_cases,
+};
+
+kunit_test_suite(fpga_mgr_suite);
+
+MODULE_DESCRIPTION("KUnit test for the FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
new file mode 100644
index 000000000000..020ceac48509
--- /dev/null
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the FPGA Region
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+struct mgr_stats {
+ u32 write_count;
+};
+
+struct bridge_stats {
+ bool enable;
+ u32 cycles_count;
+};
+
+struct test_ctx {
+ struct fpga_manager *mgr;
+ struct device *mgr_dev;
+ struct fpga_bridge *bridge;
+ struct device *bridge_dev;
+ struct fpga_region *region;
+ struct device *region_dev;
+ struct bridge_stats bridge_stats;
+ struct mgr_stats mgr_stats;
+};
+
+/*
+ * Wrappers to avoid cast warnings when passing action functions directly
+ * to kunit_add_action().
+ */
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free,
+ struct fpga_image_info *);
+
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister,
+ struct fpga_bridge *);
+
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_region_unregister_wrapper, fpga_region_unregister,
+ struct fpga_region *);
+
+static int op_write(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+
+ stats->write_count++;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA manager that implements only the write op to count the number
+ * of programming cycles. The internals of the programming sequence are
+ * tested in the Manager suite since they are outside the responsibility
+ * of the Region.
+ */
+static const struct fpga_manager_ops fake_mgr_ops = {
+ .write = op_write,
+};
+
+static int op_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct bridge_stats *stats = bridge->priv;
+
+ if (!stats->enable && enable)
+ stats->cycles_count++;
+
+ stats->enable = enable;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA bridge that implements only enable_set op to count the number
+ * of activation cycles.
+ */
+static const struct fpga_bridge_ops fake_bridge_ops = {
+ .enable_set = op_enable_set,
+};
+
+static int fake_region_get_bridges(struct fpga_region *region)
+{
+ struct fpga_bridge *bridge = region->priv;
+
+ return fpga_bridge_get_to_list(bridge->dev.parent, region->info, &region->bridge_list);
+}
+
+static int fake_region_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+static void fpga_region_test_class_find(struct kunit *test)
+{
+ struct test_ctx *ctx = test->priv;
+ struct fpga_region *region;
+
+ region = fpga_region_class_find(NULL, ctx->region_dev, fake_region_match);
+ KUNIT_EXPECT_PTR_EQ(test, region, ctx->region);
+
+ put_device(&region->dev);
+}
+
+/*
+ * FPGA Region programming test. The Region must call get_bridges() to get
+ * and control the bridges, and then the Manager for the actual programming.
+ */
+static void fpga_region_test_program_fpga(struct kunit *test)
+{
+ struct test_ctx *ctx = test->priv;
+ struct fpga_image_info *img_info;
+ char img_buf[4];
+ int ret;
+
+ img_info = fpga_image_info_alloc(ctx->mgr_dev);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, img_info);
+
+ ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, img_info);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ img_info->buf = img_buf;
+ img_info->count = sizeof(img_buf);
+
+ ctx->region->info = img_info;
+ ret = fpga_region_program_fpga(ctx->region);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, 1, ctx->mgr_stats.write_count);
+ KUNIT_EXPECT_EQ(test, 1, ctx->bridge_stats.cycles_count);
+
+ fpga_bridges_put(&ctx->region->bridge_list);
+
+ ret = fpga_region_program_fpga(ctx->region);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, 2, ctx->mgr_stats.write_count);
+ KUNIT_EXPECT_EQ(test, 2, ctx->bridge_stats.cycles_count);
+
+ fpga_bridges_put(&ctx->region->bridge_list);
+}
+
+/*
+ * The configuration used in this test suite uses a single bridge to
+ * limit the code under test to a single unit. The functions used by the
+ * Region for getting and controlling bridges are tested (with a list of
+ * multiple bridges) in the Bridge suite.
+ */
+static int fpga_region_test_init(struct kunit *test)
+{
+ struct test_ctx *ctx;
+ struct fpga_region_info region_info = { 0 };
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->mgr_dev = kunit_device_register(test, "fpga-manager-test-dev");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->mgr_dev);
+
+ ctx->mgr = devm_fpga_mgr_register(ctx->mgr_dev, "Fake FPGA Manager",
+ &fake_mgr_ops, &ctx->mgr_stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr));
+
+ ctx->bridge_dev = kunit_device_register(test, "fpga-bridge-test-dev");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->bridge_dev);
+
+ ctx->bridge = fpga_bridge_register(ctx->bridge_dev, "Fake FPGA Bridge",
+ &fake_bridge_ops, &ctx->bridge_stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge));
+
+ ctx->bridge_stats.enable = true;
+
+ ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ctx->region_dev = kunit_device_register(test, "fpga-region-test-dev");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_dev);
+
+ region_info.mgr = ctx->mgr;
+ region_info.priv = ctx->bridge;
+ region_info.get_bridges = fake_region_get_bridges;
+
+ ctx->region = fpga_region_register_full(ctx->region_dev, &region_info);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->region));
+
+ ret = kunit_add_action_or_reset(test, fpga_region_unregister_wrapper, ctx->region);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ test->priv = ctx;
+
+ return 0;
+}
+
+static struct kunit_case fpga_region_test_cases[] = {
+ KUNIT_CASE(fpga_region_test_class_find),
+ KUNIT_CASE(fpga_region_test_program_fpga),
+ {}
+};
+
+static struct kunit_suite fpga_region_suite = {
+ .name = "fpga_region",
+ .init = fpga_region_test_init,
+ .test_cases = fpga_region_test_cases,
+};
+
+kunit_test_suite(fpga_region_suite);
+
+MODULE_DESCRIPTION("KUnit test for the FPGA Region");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 8e6e9c840d9d..4e1d2a4d3df4 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -103,7 +103,6 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
struct device *kdev = &pdev->dev;
struct ts73xx_fpga_priv *priv;
struct fpga_manager *mgr;
- struct resource *res;
priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -111,8 +110,7 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
priv->dev = kdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->io_base = devm_ioremap_resource(kdev, res);
+ priv->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
index e1601b3a345b..e6189106c468 100644
--- a/drivers/fpga/versal-fpga.c
+++ b/drivers/fpga/versal-fpga.c
@@ -48,7 +48,7 @@ static int versal_fpga_probe(struct platform_device *pdev)
struct fpga_manager *mgr;
int ret;
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
if (ret < 0) {
dev_err(dev, "no usable DMA configuration\n");
return ret;
@@ -69,7 +69,7 @@ static struct platform_driver versal_fpga_driver = {
.probe = versal_fpga_probe,
.driver = {
.name = "versal_fpga_manager",
- .of_match_table = of_match_ptr(versal_fpga_of_match),
+ .of_match_table = versal_fpga_of_match,
},
};
module_platform_driver(versal_fpga_driver);
diff --git a/drivers/fpga/xilinx-core.c b/drivers/fpga/xilinx-core.c
new file mode 100644
index 000000000000..39aeacf2e4f1
--- /dev/null
+++ b/drivers/fpga/xilinx-core.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Common parts of the Xilinx Spartan6 and 7 Series FPGA manager drivers.
+ *
+ * Copyright (C) 2017 DENX Software Engineering
+ *
+ * Anatolij Gustschin <agust@denx.de>
+ */
+
+#include "xilinx-core.h"
+
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+
+static int get_done_gpio(struct fpga_manager *mgr)
+{
+ struct xilinx_fpga_core *core = mgr->priv;
+ int ret;
+
+ ret = gpiod_get_value(core->done);
+ if (ret < 0)
+ dev_err(&mgr->dev, "Error reading DONE (%d)\n", ret);
+
+ return ret;
+}
+
+static enum fpga_mgr_states xilinx_core_state(struct fpga_manager *mgr)
+{
+ if (!get_done_gpio(mgr))
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+/**
+ * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait
+ * a given delay if the pin is unavailable
+ *
+ * @mgr: The FPGA manager object
+ * @value: Value INIT_B to wait for (1 = asserted = low)
+ * @alt_udelay: Delay to wait if the INIT_B GPIO is not available
+ *
+ * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if
+ * too much time passed waiting for that. If no INIT_B GPIO is available
+ * then always return 0.
+ */
+static int wait_for_init_b(struct fpga_manager *mgr, int value,
+ unsigned long alt_udelay)
+{
+ struct xilinx_fpga_core *core = mgr->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ if (core->init_b) {
+ while (time_before(jiffies, timeout)) {
+ int ret = gpiod_get_value(core->init_b);
+
+ if (ret == value)
+ return 0;
+
+ if (ret < 0) {
+ dev_err(&mgr->dev,
+ "Error reading INIT_B (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(100, 400);
+ }
+
+ dev_err(&mgr->dev, "Timeout waiting for INIT_B to %s\n",
+ value ? "assert" : "deassert");
+ return -ETIMEDOUT;
+ }
+
+ udelay(alt_udelay);
+
+ return 0;
+}
+
+static int xilinx_core_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info, const char *buf,
+ size_t count)
+{
+ struct xilinx_fpga_core *core = mgr->priv;
+ int err;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported\n");
+ return -EINVAL;
+ }
+
+ gpiod_set_value(core->prog_b, 1);
+
+ err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */
+ if (err) {
+ gpiod_set_value(core->prog_b, 0);
+ return err;
+ }
+
+ gpiod_set_value(core->prog_b, 0);
+
+ err = wait_for_init_b(mgr, 0, 0);
+ if (err)
+ return err;
+
+ if (get_done_gpio(mgr)) {
+ dev_err(&mgr->dev, "Unexpected DONE pin state...\n");
+ return -EIO;
+ }
+
+ /* program latency */
+ usleep_range(7500, 7600);
+ return 0;
+}
+
+static int xilinx_core_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct xilinx_fpga_core *core = mgr->priv;
+
+ return core->write(core, buf, count);
+}
+
+static int xilinx_core_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct xilinx_fpga_core *core = mgr->priv;
+ unsigned long timeout =
+ jiffies + usecs_to_jiffies(info->config_complete_timeout_us);
+ bool expired = false;
+ int done;
+ int ret;
+ const char padding[1] = { 0xff };
+
+ /*
+ * This loop is carefully written such that if the driver is
+ * scheduled out for more than 'timeout', we still check for DONE
+ * before giving up and we apply 8 extra CCLK cycles in all cases.
+ */
+ while (!expired) {
+ expired = time_after(jiffies, timeout);
+
+ done = get_done_gpio(mgr);
+ if (done < 0)
+ return done;
+
+ ret = core->write(core, padding, sizeof(padding));
+ if (ret)
+ return ret;
+
+ if (done)
+ return 0;
+ }
+
+ if (core->init_b) {
+ ret = gpiod_get_value(core->init_b);
+
+ if (ret < 0) {
+ dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret);
+ return ret;
+ }
+
+ dev_err(&mgr->dev,
+ ret ? "CRC error or invalid device\n" :
+ "Missing sync word or incomplete bitstream\n");
+ } else {
+ dev_err(&mgr->dev, "Timeout after config data transfer\n");
+ }
+
+ return -ETIMEDOUT;
+}
+
+static inline struct gpio_desc *
+xilinx_core_devm_gpiod_get(struct device *dev, const char *con_id,
+ const char *legacy_con_id, enum gpiod_flags flags)
+{
+ struct gpio_desc *desc;
+
+ desc = devm_gpiod_get(dev, con_id, flags);
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT &&
+ of_device_is_compatible(dev->of_node, "xlnx,fpga-slave-serial"))
+ desc = devm_gpiod_get(dev, legacy_con_id, flags);
+
+ return desc;
+}
+
+static const struct fpga_manager_ops xilinx_core_ops = {
+ .state = xilinx_core_state,
+ .write_init = xilinx_core_write_init,
+ .write = xilinx_core_write,
+ .write_complete = xilinx_core_write_complete,
+};
+
+int xilinx_core_probe(struct xilinx_fpga_core *core)
+{
+ struct fpga_manager *mgr;
+
+ if (!core || !core->dev || !core->write)
+ return -EINVAL;
+
+ /* PROGRAM_B is active low */
+ core->prog_b = xilinx_core_devm_gpiod_get(core->dev, "prog", "prog_b",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(core->prog_b))
+ return dev_err_probe(core->dev, PTR_ERR(core->prog_b),
+ "Failed to get PROGRAM_B gpio\n");
+
+ core->init_b = xilinx_core_devm_gpiod_get(core->dev, "init", "init-b",
+ GPIOD_IN);
+ if (IS_ERR(core->init_b))
+ return dev_err_probe(core->dev, PTR_ERR(core->init_b),
+ "Failed to get INIT_B gpio\n");
+
+ core->done = devm_gpiod_get(core->dev, "done", GPIOD_IN);
+ if (IS_ERR(core->done))
+ return dev_err_probe(core->dev, PTR_ERR(core->done),
+ "Failed to get DONE gpio\n");
+
+ mgr = devm_fpga_mgr_register(core->dev,
+ "Xilinx Slave Serial FPGA Manager",
+ &xilinx_core_ops, core);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+EXPORT_SYMBOL_GPL(xilinx_core_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_DESCRIPTION("Xilinx 7 Series FPGA manager core");
diff --git a/drivers/fpga/xilinx-core.h b/drivers/fpga/xilinx-core.h
new file mode 100644
index 000000000000..f02ac67fce7b
--- /dev/null
+++ b/drivers/fpga/xilinx-core.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __XILINX_CORE_H
+#define __XILINX_CORE_H
+
+#include <linux/device.h>
+
+/**
+ * struct xilinx_fpga_core - interface between the driver and the core manager
+ * of Xilinx 7 Series FPGA manager
+ * @dev: device node
+ * @write: write callback of the driver
+ */
+struct xilinx_fpga_core {
+/* public: */
+ struct device *dev;
+ int (*write)(struct xilinx_fpga_core *core, const char *buf,
+ size_t count);
+/* private: handled by xilinx-core */
+ struct gpio_desc *prog_b;
+ struct gpio_desc *init_b;
+ struct gpio_desc *done;
+};
+
+int xilinx_core_probe(struct xilinx_fpga_core *core);
+
+#endif /* __XILINX_CORE_H */
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 2d9c491f7be9..822751fad18a 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -10,8 +10,10 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/fpga/fpga-bridge.h>
#define CTRL_CMD_DECOUPLE BIT(0)
@@ -69,7 +71,7 @@ static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge)
if (err)
return err;
- status = readl(priv->io_base);
+ status = xlnx_pr_decouple_read(priv, CTRL_OFFSET);
clk_disable(priv->clk);
@@ -81,7 +83,6 @@ static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
.enable_show = xlnx_pr_decoupler_enable_show,
};
-#ifdef CONFIG_OF
static const struct xlnx_config_data decoupler_config = {
.name = "Xilinx PR Decoupler",
};
@@ -100,30 +101,20 @@ static const struct of_device_id xlnx_pr_decoupler_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, xlnx_pr_decoupler_of_match);
-#endif
static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct xlnx_pr_decoupler_data *priv;
struct fpga_bridge *br;
int err;
- struct resource *res;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- if (np) {
- const struct of_device_id *match;
+ priv->ipconfig = device_get_match_data(&pdev->dev);
- match = of_match_node(xlnx_pr_decoupler_of_match, np);
- if (match && match->data)
- priv->ipconfig = match->data;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->io_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
@@ -159,7 +150,7 @@ err_clk:
return err;
}
-static int xlnx_pr_decoupler_remove(struct platform_device *pdev)
+static void xlnx_pr_decoupler_remove(struct platform_device *pdev)
{
struct fpga_bridge *bridge = platform_get_drvdata(pdev);
struct xlnx_pr_decoupler_data *p = bridge->priv;
@@ -167,8 +158,6 @@ static int xlnx_pr_decoupler_remove(struct platform_device *pdev)
fpga_bridge_unregister(bridge);
clk_unprepare(p->clk);
-
- return 0;
}
static struct platform_driver xlnx_pr_decoupler_driver = {
@@ -176,7 +165,7 @@ static struct platform_driver xlnx_pr_decoupler_driver = {
.remove = xlnx_pr_decoupler_remove,
.driver = {
.name = "xlnx_pr_decoupler",
- .of_match_table = of_match_ptr(xlnx_pr_decoupler_of_match),
+ .of_match_table = xlnx_pr_decoupler_of_match,
},
};
diff --git a/drivers/fpga/xilinx-selectmap.c b/drivers/fpga/xilinx-selectmap.c
new file mode 100644
index 000000000000..2cd87e7e913f
--- /dev/null
+++ b/drivers/fpga/xilinx-selectmap.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Xilinx Spartan6 and 7 Series SelectMAP interface driver
+ *
+ * (C) 2024 Charles Perry <charles.perry@savoirfairelinux.com>
+ *
+ * Manage Xilinx FPGA firmware loaded over the SelectMAP configuration
+ * interface.
+ */
+
+#include "xilinx-core.h"
+
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+struct xilinx_selectmap_conf {
+ struct xilinx_fpga_core core;
+ void __iomem *base;
+};
+
+#define to_xilinx_selectmap_conf(obj) \
+ container_of(obj, struct xilinx_selectmap_conf, core)
+
+static int xilinx_selectmap_write(struct xilinx_fpga_core *core,
+ const char *buf, size_t count)
+{
+ struct xilinx_selectmap_conf *conf = to_xilinx_selectmap_conf(core);
+ size_t i;
+
+ for (i = 0; i < count; ++i)
+ writeb(buf[i], conf->base);
+
+ return 0;
+}
+
+static int xilinx_selectmap_probe(struct platform_device *pdev)
+{
+ struct xilinx_selectmap_conf *conf;
+ struct gpio_desc *gpio;
+ void __iomem *base;
+
+ conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ conf->core.dev = &pdev->dev;
+ conf->core.write = xilinx_selectmap_write;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(base),
+ "ioremap error\n");
+ conf->base = base;
+
+ /* CSI_B is active low */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "csi", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpio),
+ "Failed to get CSI_B gpio\n");
+
+ /* RDWR_B is active low */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "rdwr", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpio),
+ "Failed to get RDWR_B gpio\n");
+
+ return xilinx_core_probe(&conf->core);
+}
+
+static const struct of_device_id xlnx_selectmap_of_match[] = {
+ { .compatible = "xlnx,fpga-xc7s-selectmap", }, // Spartan-7
+ { .compatible = "xlnx,fpga-xc7a-selectmap", }, // Artix-7
+ { .compatible = "xlnx,fpga-xc7k-selectmap", }, // Kintex-7
+ { .compatible = "xlnx,fpga-xc7v-selectmap", }, // Virtex-7
+ {},
+};
+MODULE_DEVICE_TABLE(of, xlnx_selectmap_of_match);
+
+static struct platform_driver xilinx_selectmap_driver = {
+ .driver = {
+ .name = "xilinx-selectmap",
+ .of_match_table = xlnx_selectmap_of_match,
+ },
+ .probe = xilinx_selectmap_probe,
+};
+
+module_platform_driver(xilinx_selectmap_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Charles Perry <charles.perry@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Load Xilinx FPGA firmware over SelectMap");
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index e1a227e7ff2a..e294e3a6cc03 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -10,127 +10,17 @@
* the slave serial configuration interface.
*/
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/fpga/fpga-mgr.h>
-#include <linux/gpio/consumer.h>
+#include "xilinx-core.h"
+
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
-#include <linux/sizes.h>
-
-struct xilinx_spi_conf {
- struct spi_device *spi;
- struct gpio_desc *prog_b;
- struct gpio_desc *init_b;
- struct gpio_desc *done;
-};
-
-static int get_done_gpio(struct fpga_manager *mgr)
-{
- struct xilinx_spi_conf *conf = mgr->priv;
- int ret;
-
- ret = gpiod_get_value(conf->done);
-
- if (ret < 0)
- dev_err(&mgr->dev, "Error reading DONE (%d)\n", ret);
-
- return ret;
-}
-
-static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr)
-{
- if (!get_done_gpio(mgr))
- return FPGA_MGR_STATE_RESET;
-
- return FPGA_MGR_STATE_UNKNOWN;
-}
-
-/**
- * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait
- * a given delay if the pin is unavailable
- *
- * @mgr: The FPGA manager object
- * @value: Value INIT_B to wait for (1 = asserted = low)
- * @alt_udelay: Delay to wait if the INIT_B GPIO is not available
- *
- * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if
- * too much time passed waiting for that. If no INIT_B GPIO is available
- * then always return 0.
- */
-static int wait_for_init_b(struct fpga_manager *mgr, int value,
- unsigned long alt_udelay)
-{
- struct xilinx_spi_conf *conf = mgr->priv;
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
- if (conf->init_b) {
- while (time_before(jiffies, timeout)) {
- int ret = gpiod_get_value(conf->init_b);
-
- if (ret == value)
- return 0;
-
- if (ret < 0) {
- dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret);
- return ret;
- }
-
- usleep_range(100, 400);
- }
-
- dev_err(&mgr->dev, "Timeout waiting for INIT_B to %s\n",
- value ? "assert" : "deassert");
- return -ETIMEDOUT;
- }
-
- udelay(alt_udelay);
-
- return 0;
-}
-
-static int xilinx_spi_write_init(struct fpga_manager *mgr,
- struct fpga_image_info *info,
- const char *buf, size_t count)
-{
- struct xilinx_spi_conf *conf = mgr->priv;
- int err;
-
- if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
- dev_err(&mgr->dev, "Partial reconfiguration not supported\n");
- return -EINVAL;
- }
-
- gpiod_set_value(conf->prog_b, 1);
-
- err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */
- if (err) {
- gpiod_set_value(conf->prog_b, 0);
- return err;
- }
- gpiod_set_value(conf->prog_b, 0);
-
- err = wait_for_init_b(mgr, 0, 0);
- if (err)
- return err;
-
- if (get_done_gpio(mgr)) {
- dev_err(&mgr->dev, "Unexpected DONE pin state...\n");
- return -EIO;
- }
-
- /* program latency */
- usleep_range(7500, 7600);
- return 0;
-}
-
-static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf,
+static int xilinx_spi_write(struct xilinx_fpga_core *core, const char *buf,
size_t count)
{
- struct xilinx_spi_conf *conf = mgr->priv;
+ struct spi_device *spi = to_spi_device(core->dev);
const char *fw_data = buf;
const char *fw_data_end = fw_data + count;
@@ -141,9 +31,9 @@ static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf,
remaining = fw_data_end - fw_data;
stride = min_t(size_t, remaining, SZ_4K);
- ret = spi_write(conf->spi, fw_data, stride);
+ ret = spi_write(spi, fw_data, stride);
if (ret) {
- dev_err(&mgr->dev, "SPI error in firmware write: %d\n",
+ dev_err(core->dev, "SPI error in firmware write: %d\n",
ret);
return ret;
}
@@ -153,109 +43,31 @@ static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf,
return 0;
}
-static int xilinx_spi_apply_cclk_cycles(struct xilinx_spi_conf *conf)
-{
- struct spi_device *spi = conf->spi;
- const u8 din_data[1] = { 0xff };
- int ret;
-
- ret = spi_write(conf->spi, din_data, sizeof(din_data));
- if (ret)
- dev_err(&spi->dev, "applying CCLK cycles failed: %d\n", ret);
-
- return ret;
-}
-
-static int xilinx_spi_write_complete(struct fpga_manager *mgr,
- struct fpga_image_info *info)
-{
- struct xilinx_spi_conf *conf = mgr->priv;
- unsigned long timeout = jiffies + usecs_to_jiffies(info->config_complete_timeout_us);
- bool expired = false;
- int done;
- int ret;
-
- /*
- * This loop is carefully written such that if the driver is
- * scheduled out for more than 'timeout', we still check for DONE
- * before giving up and we apply 8 extra CCLK cycles in all cases.
- */
- while (!expired) {
- expired = time_after(jiffies, timeout);
-
- done = get_done_gpio(mgr);
- if (done < 0)
- return done;
-
- ret = xilinx_spi_apply_cclk_cycles(conf);
- if (ret)
- return ret;
-
- if (done)
- return 0;
- }
-
- if (conf->init_b) {
- ret = gpiod_get_value(conf->init_b);
-
- if (ret < 0) {
- dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret);
- return ret;
- }
-
- dev_err(&mgr->dev,
- ret ? "CRC error or invalid device\n"
- : "Missing sync word or incomplete bitstream\n");
- } else {
- dev_err(&mgr->dev, "Timeout after config data transfer\n");
- }
-
- return -ETIMEDOUT;
-}
-
-static const struct fpga_manager_ops xilinx_spi_ops = {
- .state = xilinx_spi_state,
- .write_init = xilinx_spi_write_init,
- .write = xilinx_spi_write,
- .write_complete = xilinx_spi_write_complete,
-};
-
static int xilinx_spi_probe(struct spi_device *spi)
{
- struct xilinx_spi_conf *conf;
- struct fpga_manager *mgr;
+ struct xilinx_fpga_core *core;
- conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
- if (!conf)
+ core = devm_kzalloc(&spi->dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
return -ENOMEM;
- conf->spi = spi;
-
- /* PROGRAM_B is active low */
- conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
- if (IS_ERR(conf->prog_b))
- return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
- "Failed to get PROGRAM_B gpio\n");
-
- conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
- if (IS_ERR(conf->init_b))
- return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
- "Failed to get INIT_B gpio\n");
+ core->dev = &spi->dev;
+ core->write = xilinx_spi_write;
- conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
- if (IS_ERR(conf->done))
- return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
- "Failed to get DONE gpio\n");
-
- mgr = devm_fpga_mgr_register(&spi->dev,
- "Xilinx Slave Serial FPGA Manager",
- &xilinx_spi_ops, conf);
- return PTR_ERR_OR_ZERO(mgr);
+ return xilinx_core_probe(core);
}
+static const struct spi_device_id xilinx_spi_ids[] = {
+ { "fpga-slave-serial" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, xilinx_spi_ids);
+
#ifdef CONFIG_OF
static const struct of_device_id xlnx_spi_of_match[] = {
- { .compatible = "xlnx,fpga-slave-serial", },
+ {
+ .compatible = "xlnx,fpga-slave-serial",
+ },
{}
};
MODULE_DEVICE_TABLE(of, xlnx_spi_of_match);
@@ -267,6 +79,7 @@ static struct spi_driver xilinx_slave_spi_driver = {
.of_match_table = of_match_ptr(xlnx_spi_of_match),
},
.probe = xilinx_spi_probe,
+ .id_table = xilinx_spi_ids,
};
module_spi_driver(xilinx_slave_spi_driver)
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index ae0da361e6c6..b7629a0e4813 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -387,7 +387,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
const char *why;
int err;
u32 intr_status;
- unsigned long timeout;
+ unsigned long time_left;
unsigned long flags;
struct scatterlist *sg;
int i;
@@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
}
}
- priv->dma_nelms =
- dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
- if (priv->dma_nelms == 0) {
+ err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
+ if (err) {
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
- return -ENOMEM;
+ return err;
}
+ priv->dma_nelms = sgt->nents;
/* enable clock */
err = clk_enable(priv->clk);
@@ -427,8 +427,8 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
zynq_step_dma(priv);
spin_unlock_irqrestore(&priv->dma_lock, flags);
- timeout = wait_for_completion_timeout(&priv->dma_done,
- msecs_to_jiffies(DMA_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&priv->dma_done,
+ msecs_to_jiffies(DMA_TIMEOUT_MS));
spin_lock_irqsave(&priv->dma_lock, flags);
zynq_fpga_set_irq(priv, 0);
@@ -452,7 +452,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
if (priv->cur_sg ||
!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
- if (timeout == 0)
+ if (time_left == 0)
why = "DMA timed out";
else
why = "DMA did not complete";
@@ -478,7 +478,7 @@ out_clk:
clk_disable(priv->clk);
out_free:
- dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
return err;
}
@@ -493,15 +493,15 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
if (err)
return err;
- /* Release 'PR' control back to the ICAP */
- zynq_fpga_write(priv, CTRL_OFFSET,
- zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK);
-
err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status,
intr_status & IXR_PCFG_DONE_MASK,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
+ /* Release 'PR' control back to the ICAP */
+ zynq_fpga_write(priv, CTRL_OFFSET,
+ zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK);
+
clk_disable(priv->clk);
if (err)
@@ -555,7 +555,6 @@ static int zynq_fpga_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct zynq_fpga_priv *priv;
struct fpga_manager *mgr;
- struct resource *res;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -563,8 +562,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->dma_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->io_base = devm_ioremap_resource(dev, res);
+ priv->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
@@ -620,7 +618,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
return 0;
}
-static int zynq_fpga_remove(struct platform_device *pdev)
+static void zynq_fpga_remove(struct platform_device *pdev)
{
struct zynq_fpga_priv *priv;
struct fpga_manager *mgr;
@@ -631,8 +629,6 @@ static int zynq_fpga_remove(struct platform_device *pdev)
fpga_mgr_unregister(mgr);
clk_unprepare(priv->clk);
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index c60f20949c47..f3434e2c487b 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -77,6 +77,26 @@ static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
return FPGA_MGR_STATE_UNKNOWN;
}
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 status;
+ int ret;
+
+ ret = zynqmp_pm_fpga_get_config_status(&status);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", status);
+}
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *zynqmp_fpga_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(zynqmp_fpga);
+
static const struct fpga_manager_ops zynqmp_fpga_ops = {
.state = zynqmp_fpga_ops_state,
.write_init = zynqmp_fpga_ops_write_init,
@@ -113,6 +133,7 @@ static struct platform_driver zynqmp_fpga_driver = {
.driver = {
.name = "zynqmp_fpga_manager",
.of_match_table = of_match_ptr(zynqmp_fpga_of_match),
+ .dev_groups = zynqmp_fpga_groups,
},
};