diff options
Diffstat (limited to 'drivers/fpga')
56 files changed, 4926 insertions, 1916 deletions
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 8cd454ee20c0..37b35f58f0df 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -64,9 +64,21 @@ config FPGA_MGR_STRATIX10_SOC help FPGA manager driver support for the Intel Stratix10 SoC. +config FPGA_MGR_XILINX_CORE + tristate + +config FPGA_MGR_XILINX_SELECTMAP + tristate "Xilinx Configuration over SelectMAP" + depends on HAS_IOMEM + select FPGA_MGR_XILINX_CORE + help + FPGA manager driver support for Xilinx FPGA configuration + over SelectMAP interface. + config FPGA_MGR_XILINX_SPI tristate "Xilinx Configuration over Slave Serial (SPI)" depends on SPI + select FPGA_MGR_XILINX_CORE help FPGA manager driver support for Xilinx FPGA configuration over slave serial interface. @@ -119,7 +131,7 @@ config XILINX_PR_DECOUPLER depends on HAS_IOMEM help Say Y to enable drivers for Xilinx LogiCORE PR Decoupler - or Xilinx Dynamic Function eXchnage AIX Shutdown Manager. + or Xilinx Dynamic Function eXchange AIX Shutdown Manager. The PR Decoupler exists in the FPGA fabric to isolate one region of the FPGA from the busses while that region is being reprogrammed during partial reconfig. @@ -234,4 +246,48 @@ config FPGA_MGR_ZYNQMP_FPGA to configure the programmable logic(PL) through PS on ZynqMP SoC. +config FPGA_MGR_VERSAL_FPGA + tristate "Xilinx Versal FPGA" + depends on ARCH_ZYNQMP || COMPILE_TEST + help + Select this option to enable FPGA manager driver support for + Xilinx Versal SoC. This driver uses the firmware interface to + configure the programmable logic(PL). + + To compile this as a module, choose M here. + +config FPGA_M10_BMC_SEC_UPDATE + tristate "Intel MAX10 BMC Secure Update driver" + depends on MFD_INTEL_M10_BMC_CORE + select FW_LOADER + select FW_UPLOAD + help + Secure update support for the Intel MAX10 board management + controller. + + This is a subdriver of the Intel MAX10 board management controller + (BMC) and provides support for secure updates for the BMC image, + the FPGA image, the Root Entry Hashes, etc. + +config FPGA_MGR_MICROCHIP_SPI + tristate "Microchip Polarfire SPI FPGA manager" + depends on SPI + help + FPGA manager driver support for Microchip Polarfire FPGAs + programming over slave SPI interface with .dat formatted + bitstream image. + +config FPGA_MGR_LATTICE_SYSCONFIG + tristate + +config FPGA_MGR_LATTICE_SYSCONFIG_SPI + tristate "Lattice sysCONFIG SPI FPGA manager" + depends on SPI + select FPGA_MGR_LATTICE_SYSCONFIG + help + FPGA manager driver support for Lattice FPGAs programming over slave + SPI sysCONFIG interface. + +source "drivers/fpga/tests/Kconfig" + endif # FPGA diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 18dc9885883a..aeb89bb13517 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -15,11 +15,20 @@ obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o +obj-$(CONFIG_FPGA_MGR_XILINX_CORE) += xilinx-core.o +obj-$(CONFIG_FPGA_MGR_XILINX_SELECTMAP) += xilinx-selectmap.o obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o -obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o -obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o +obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o +obj-$(CONFIG_FPGA_MGR_MICROCHIP_SPI) += microchip-spi.o +obj-$(CONFIG_FPGA_MGR_LATTICE_SYSCONFIG) += lattice-sysconfig.o +obj-$(CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI) += lattice-sysconfig-spi.o +obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o +obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o + +# FPGA Secure Update Drivers +obj-$(CONFIG_FPGA_M10_BMC_SEC_UPDATE) += intel-m10-bmc-sec-update.o # FPGA Bridge Drivers obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o @@ -48,3 +57,6 @@ obj-$(CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000) += dfl-n3000-nios.o # Drivers for FPGAs which implement DFL obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o + +# KUnit tests +obj-$(CONFIG_FPGA_KUNIT_TESTS) += tests/ diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 4e0edb60bfba..44badfd11e1b 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -22,9 +22,6 @@ #define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */ /* Vendor Specific Extended Capability Registers */ -#define VSE_PCIE_EXT_CAP_ID 0x0 -#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */ - #define VSE_CVP_STATUS 0x1c /* 32bit */ #define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */ #define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */ @@ -52,7 +49,7 @@ /* V2 Defines */ #define VSE_CVP_TX_CREDITS 0x49 /* 8bit */ -#define V2_CREDIT_TIMEOUT_US 20000 +#define V2_CREDIT_TIMEOUT_US 40000 #define V2_CHECK_CREDIT_US 10 #define V2_POLL_TIMEOUT_US 1000000 #define V2_USER_TIMEOUT_US 500000 @@ -72,7 +69,6 @@ static bool altera_cvp_chkcfg; struct cvp_priv; struct altera_cvp_conf { - struct fpga_manager *mgr; struct pci_dev *pci_dev; void __iomem *map; void (*write_data)(struct altera_cvp_conf *conf, @@ -346,7 +342,7 @@ static int altera_cvp_write_init(struct fpga_manager *mgr, } if (val & VSE_CVP_STATUS_CFG_RDY) { - dev_warn(&mgr->dev, "CvP already started, teardown first\n"); + dev_warn(&mgr->dev, "CvP already started, tear down first\n"); ret = altera_cvp_teardown(mgr, info); if (ret) return ret; @@ -578,25 +574,18 @@ static int altera_cvp_probe(struct pci_dev *pdev, { struct altera_cvp_conf *conf; struct fpga_manager *mgr; - int ret, offset; - u16 cmd, val; + u16 cmd, offset; u32 regval; - - /* Discover the Vendor Specific Offset for this device */ - offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR); - if (!offset) { - dev_err(&pdev->dev, "No Vendor Specific Offset.\n"); - return -ENODEV; - } + int ret; /* * First check if this is the expected FPGA device. PCI config * space access works without enabling the PCI device, memory * space access is enabled further down. */ - pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val); - if (val != VSE_PCIE_EXT_CAP_ID_VAL) { - dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val); + offset = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_ALTERA, 0x1172); + if (!offset) { + dev_err(&pdev->dev, "Wrong VSEC ID value\n"); return -ENODEV; } @@ -652,19 +641,15 @@ static int altera_cvp_probe(struct pci_dev *pdev, snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s", ALTERA_CVP_MGR_NAME, pci_name(pdev)); - mgr = devm_fpga_mgr_create(&pdev->dev, conf->mgr_name, - &altera_cvp_ops, conf); - if (!mgr) { - ret = -ENOMEM; + mgr = fpga_mgr_register(&pdev->dev, conf->mgr_name, + &altera_cvp_ops, conf); + if (IS_ERR(mgr)) { + ret = PTR_ERR(mgr); goto err_unmap; } pci_set_drvdata(pdev, mgr); - ret = fpga_mgr_register(mgr); - if (ret) - goto err_unmap; - return 0; err_unmap: diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c index a78e49c63c64..e41492988dd6 100644 --- a/drivers/fpga/altera-fpga2sdram.c +++ b/drivers/fpga/altera-fpga2sdram.c @@ -27,7 +27,7 @@ #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/regmap.h> #define ALT_SDR_CTL_FPGAPORTRST_OFST 0x80 @@ -75,12 +75,6 @@ static int alt_fpga2sdram_enable_set(struct fpga_bridge *bridge, bool enable) return _alt_fpga2sdram_enable_set(bridge->priv, enable); } -struct prop_map { - char *prop_name; - u32 *prop_value; - u32 prop_max; -}; - static const struct fpga_bridge_ops altera_fpga2sdram_br_ops = { .enable_set = alt_fpga2sdram_enable_set, .enable_show = alt_fpga2sdram_enable_show, @@ -121,17 +115,13 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) /* Get f2s bridge configuration saved in handoff register */ regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask); - br = devm_fpga_bridge_create(dev, F2S_BRIDGE_NAME, - &altera_fpga2sdram_br_ops, priv); - if (!br) - return -ENOMEM; + br = fpga_bridge_register(dev, F2S_BRIDGE_NAME, + &altera_fpga2sdram_br_ops, priv); + if (IS_ERR(br)) + return PTR_ERR(br); platform_set_drvdata(pdev, br); - ret = fpga_bridge_register(br); - if (ret) - return ret; - dev_info(dev, "driver initialized with handoff %08x\n", priv->mask); if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) { @@ -151,13 +141,11 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) return ret; } -static int alt_fpga_bridge_remove(struct platform_device *pdev) +static void alt_fpga_bridge_remove(struct platform_device *pdev) { struct fpga_bridge *br = platform_get_drvdata(pdev); fpga_bridge_unregister(br); - - return 0; } MODULE_DEVICE_TABLE(of, altera_fpga_of_match); diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c index dd58c4aea92e..594693ff786e 100644 --- a/drivers/fpga/altera-freeze-bridge.c +++ b/drivers/fpga/altera-freeze-bridge.c @@ -7,8 +7,9 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> -#include <linux/of_device.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> +#include <linux/platform_device.h> #include <linux/fpga/fpga-bridge.h> #define FREEZE_CSR_STATUS_OFFSET 0 @@ -211,14 +212,12 @@ static int altera_freeze_br_probe(struct platform_device *pdev) void __iomem *base_addr; struct altera_freeze_br_data *priv; struct fpga_bridge *br; - struct resource *res; u32 status, revision; if (!np) return -ENODEV; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base_addr = devm_ioremap_resource(dev, res); + base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base_addr)) return PTR_ERR(base_addr); @@ -244,23 +243,21 @@ static int altera_freeze_br_probe(struct platform_device *pdev) priv->base_addr = base_addr; - br = devm_fpga_bridge_create(dev, FREEZE_BRIDGE_NAME, - &altera_freeze_br_br_ops, priv); - if (!br) - return -ENOMEM; + br = fpga_bridge_register(dev, FREEZE_BRIDGE_NAME, + &altera_freeze_br_br_ops, priv); + if (IS_ERR(br)) + return PTR_ERR(br); platform_set_drvdata(pdev, br); - return fpga_bridge_register(br); + return 0; } -static int altera_freeze_br_remove(struct platform_device *pdev) +static void altera_freeze_br_remove(struct platform_device *pdev) { struct fpga_bridge *br = platform_get_drvdata(pdev); fpga_bridge_unregister(br); - - return 0; } static struct platform_driver altera_freeze_br_driver = { @@ -268,7 +265,7 @@ static struct platform_driver altera_freeze_br_driver = { .remove = altera_freeze_br_remove, .driver = { .name = "altera_freeze_br", - .of_match_table = of_match_ptr(altera_freeze_br_of_match), + .of_match_table = altera_freeze_br_of_match, }, }; diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c index 77b95f251821..f2f1250689cb 100644 --- a/drivers/fpga/altera-hps2fpga.c +++ b/drivers/fpga/altera-hps2fpga.c @@ -24,7 +24,8 @@ #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/spinlock.h> @@ -127,18 +128,11 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct altera_hps2fpga_data *priv; - const struct of_device_id *of_id; struct fpga_bridge *br; u32 enable; int ret; - of_id = of_match_device(altera_fpga_of_match, dev); - if (!of_id) { - dev_err(dev, "failed to match device\n"); - return -ENODEV; - } - - priv = (struct altera_hps2fpga_data *)of_id->data; + priv = (struct altera_hps2fpga_data *)device_get_match_data(dev); priv->bridge_reset = of_reset_control_get_exclusive_by_index(dev->of_node, 0); @@ -180,19 +174,15 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) } } - br = devm_fpga_bridge_create(dev, priv->name, - &altera_hps2fpga_br_ops, priv); - if (!br) { - ret = -ENOMEM; + br = fpga_bridge_register(dev, priv->name, + &altera_hps2fpga_br_ops, priv); + if (IS_ERR(br)) { + ret = PTR_ERR(br); goto err; } platform_set_drvdata(pdev, br); - ret = fpga_bridge_register(br); - if (ret) - goto err; - return 0; err: @@ -201,7 +191,7 @@ err: return ret; } -static int alt_fpga_bridge_remove(struct platform_device *pdev) +static void alt_fpga_bridge_remove(struct platform_device *pdev) { struct fpga_bridge *bridge = platform_get_drvdata(pdev); struct altera_hps2fpga_data *priv = bridge->priv; @@ -209,8 +199,6 @@ static int alt_fpga_bridge_remove(struct platform_device *pdev) fpga_bridge_unregister(bridge); clk_disable_unprepare(priv->clk); - - return 0; } MODULE_DEVICE_TABLE(of, altera_fpga_of_match); diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c index b008a6b8d2d3..9dc263930007 100644 --- a/drivers/fpga/altera-pr-ip-core-plat.c +++ b/drivers/fpga/altera-pr-ip-core-plat.c @@ -9,19 +9,16 @@ */ #include <linux/fpga/altera-pr-ip-core.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> static int alt_pr_platform_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; void __iomem *reg_base; - struct resource *res; /* First mmio base is for register access */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - reg_base = devm_ioremap_resource(dev, res); - + reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg_base)) return PTR_ERR(reg_base); diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c index dfdf21ed34c4..df8671af4a92 100644 --- a/drivers/fpga/altera-pr-ip-core.c +++ b/drivers/fpga/altera-pr-ip-core.c @@ -108,7 +108,7 @@ static int alt_pr_fpga_write(struct fpga_manager *mgr, const char *buf, u32 *buffer_32 = (u32 *)buf; size_t i = 0; - if (count <= 0) + if (!count) return -EINVAL; /* Write out the complete 32-bit chunks */ @@ -191,11 +191,8 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base) (val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT, (int)(val & ALT_PR_CSR_PR_START)); - mgr = devm_fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, dev_name(dev), &alt_pr_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } EXPORT_SYMBOL_GPL(alt_pr_register); diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 23bfd4d1ad0f..d0ec3539b31f 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -18,8 +18,7 @@ #include <linux/fpga/fpga-mgr.h> #include <linux/gpio/consumer.h> #include <linux/module.h> -#include <linux/of_gpio.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/spi/spi.h> #include <linux/sizes.h> @@ -72,12 +71,6 @@ static struct altera_ps_data a10_data = { .t_st2ck_us = 10, /* min(t_ST2CK) */ }; -/* Array index is enum altera_ps_devtype */ -static const struct altera_ps_data *altera_ps_data_map[] = { - &c5_data, - &a10_data, -}; - static const struct of_device_id of_ef_match[] = { { .compatible = "altr,fpga-passive-serial", .data = &c5_data }, { .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data }, @@ -237,43 +230,16 @@ static const struct fpga_manager_ops altera_ps_ops = { .write_complete = altera_ps_write_complete, }; -static const struct altera_ps_data *id_to_data(const struct spi_device_id *id) -{ - kernel_ulong_t devtype = id->driver_data; - const struct altera_ps_data *data; - - /* someone added a altera_ps_devtype without adding to the map array */ - if (devtype >= ARRAY_SIZE(altera_ps_data_map)) - return NULL; - - data = altera_ps_data_map[devtype]; - if (!data || data->devtype != devtype) - return NULL; - - return data; -} - static int altera_ps_probe(struct spi_device *spi) { struct altera_ps_conf *conf; - const struct of_device_id *of_id; struct fpga_manager *mgr; conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL); if (!conf) return -ENOMEM; - if (spi->dev.of_node) { - of_id = of_match_device(of_ef_match, &spi->dev); - if (!of_id) - return -ENODEV; - conf->data = of_id->data; - } else { - conf->data = id_to_data(spi_get_device_id(spi)); - if (!conf->data) - return -ENODEV; - } - + conf->data = spi_get_device_match_data(spi); conf->spi = spi; conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW); if (IS_ERR(conf->config)) { @@ -302,18 +268,15 @@ static int altera_ps_probe(struct spi_device *spi) snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s", dev_driver_string(&spi->dev), dev_name(&spi->dev)); - mgr = devm_fpga_mgr_create(&spi->dev, conf->mgr_name, - &altera_ps_ops, conf); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(&spi->dev, mgr); + mgr = devm_fpga_mgr_register(&spi->dev, conf->mgr_name, + &altera_ps_ops, conf); + return PTR_ERR_OR_ZERO(mgr); } static const struct spi_device_id altera_ps_spi_ids[] = { - { "cyclone-ps-spi", CYCLONE5 }, - { "fpga-passive-serial", CYCLONE5 }, - { "fpga-arria10-passive-serial", ARRIA10 }, + { "cyclone-ps-spi", (uintptr_t)&c5_data }, + { "fpga-passive-serial", (uintptr_t)&c5_data }, + { "fpga-arria10-passive-serial", (uintptr_t)&a10_data }, {} }; MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids); @@ -321,8 +284,7 @@ MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids); static struct spi_driver altera_ps_driver = { .driver = { .name = "altera-ps-spi", - .owner = THIS_MODULE, - .of_match_table = of_match_ptr(of_ef_match), + .of_match_table = of_ef_match, }, .id_table = altera_ps_spi_ids, .probe = altera_ps_probe, diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index 02b60fde0430..5aa7b8884374 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c @@ -16,26 +16,26 @@ #include "dfl-afu.h" -void afu_dma_region_init(struct dfl_feature_platform_data *pdata) +void afu_dma_region_init(struct dfl_feature_dev_data *fdata) { - struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); + struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata); afu->dma_regions = RB_ROOT; } /** * afu_dma_pin_pages - pin pages of given dma memory region - * @pdata: feature device platform data + * @fdata: feature dev data * @region: dma memory region to be pinned * * Pin all the pages of given dfl_afu_dma_region. * Return 0 for success or negative error code. */ -static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, +static int afu_dma_pin_pages(struct dfl_feature_dev_data *fdata, struct dfl_afu_dma_region *region) { int npages = region->length >> PAGE_SHIFT; - struct device *dev = &pdata->dev->dev; + struct device *dev = &fdata->dev->dev; int ret, pinned; ret = account_locked_vm(current->mm, npages, true); @@ -73,17 +73,17 @@ unlock_vm: /** * afu_dma_unpin_pages - unpin pages of given dma memory region - * @pdata: feature device platform data + * @fdata: feature dev data * @region: dma memory region to be unpinned * * Unpin all the pages of given dfl_afu_dma_region. * Return 0 for success or negative error code. */ -static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata, +static void afu_dma_unpin_pages(struct dfl_feature_dev_data *fdata, struct dfl_afu_dma_region *region) { long npages = region->length >> PAGE_SHIFT; - struct device *dev = &pdata->dev->dev; + struct device *dev = &fdata->dev->dev; unpin_user_pages(region->pages, npages); kfree(region->pages); @@ -133,20 +133,20 @@ static bool dma_region_check_iova(struct dfl_afu_dma_region *region, /** * afu_dma_region_add - add given dma region to rbtree - * @pdata: feature device platform data + * @fdata: feature dev data * @region: dma region to be added * * Return 0 for success, -EEXIST if dma region has already been added. * - * Needs to be called with pdata->lock heold. + * Needs to be called with fdata->lock held. */ -static int afu_dma_region_add(struct dfl_feature_platform_data *pdata, +static int afu_dma_region_add(struct dfl_feature_dev_data *fdata, struct dfl_afu_dma_region *region) { - struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); + struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata); struct rb_node **new, *parent = NULL; - dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", + dev_dbg(&fdata->dev->dev, "add region (iova = %llx)\n", (unsigned long long)region->iova); new = &afu->dma_regions.rb_node; @@ -177,50 +177,50 @@ static int afu_dma_region_add(struct dfl_feature_platform_data *pdata, /** * afu_dma_region_remove - remove given dma region from rbtree - * @pdata: feature device platform data + * @fdata: feature dev data * @region: dma region to be removed * - * Needs to be called with pdata->lock heold. + * Needs to be called with fdata->lock held. */ -static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata, +static void afu_dma_region_remove(struct dfl_feature_dev_data *fdata, struct dfl_afu_dma_region *region) { struct dfl_afu *afu; - dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", + dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n", (unsigned long long)region->iova); - afu = dfl_fpga_pdata_get_private(pdata); + afu = dfl_fpga_fdata_get_private(fdata); rb_erase(®ion->node, &afu->dma_regions); } /** * afu_dma_region_destroy - destroy all regions in rbtree - * @pdata: feature device platform data + * @fdata: feature dev data * - * Needs to be called with pdata->lock heold. + * Needs to be called with fdata->lock held. */ -void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata) +void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata) { - struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); + struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata); struct rb_node *node = rb_first(&afu->dma_regions); struct dfl_afu_dma_region *region; while (node) { region = container_of(node, struct dfl_afu_dma_region, node); - dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", + dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n", (unsigned long long)region->iova); rb_erase(node, &afu->dma_regions); if (region->iova) - dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), + dma_unmap_page(dfl_fpga_fdata_to_parent(fdata), region->iova, region->length, DMA_BIDIRECTIONAL); if (region->pages) - afu_dma_unpin_pages(pdata, region); + afu_dma_unpin_pages(fdata, region); node = rb_next(node); kfree(region); @@ -229,7 +229,7 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata) /** * afu_dma_region_find - find the dma region from rbtree based on iova and size - * @pdata: feature device platform data + * @fdata: feature dev data * @iova: address of the dma memory area * @size: size of the dma memory area * @@ -239,14 +239,14 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata) * [@iova, @iova+size) * If nothing is matched returns NULL. * - * Needs to be called with pdata->lock held. + * Needs to be called with fdata->lock held. */ struct dfl_afu_dma_region * -afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size) +afu_dma_region_find(struct dfl_feature_dev_data *fdata, u64 iova, u64 size) { - struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); + struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata); struct rb_node *node = afu->dma_regions.rb_node; - struct device *dev = &pdata->dev->dev; + struct device *dev = &fdata->dev->dev; while (node) { struct dfl_afu_dma_region *region; @@ -276,20 +276,20 @@ afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size) /** * afu_dma_region_find_iova - find the dma region from rbtree by iova - * @pdata: feature device platform data + * @fdata: feature dev data * @iova: address of the dma region * - * Needs to be called with pdata->lock held. + * Needs to be called with fdata->lock held. */ static struct dfl_afu_dma_region * -afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova) +afu_dma_region_find_iova(struct dfl_feature_dev_data *fdata, u64 iova) { - return afu_dma_region_find(pdata, iova, 0); + return afu_dma_region_find(fdata, iova, 0); } /** * afu_dma_map_region - map memory region for dma - * @pdata: feature device platform data + * @fdata: feature dev data * @user_addr: address of the memory region * @length: size of the memory region * @iova: pointer of iova address @@ -298,9 +298,10 @@ afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova) * of the memory region via @iova. * Return 0 for success, otherwise error code. */ -int afu_dma_map_region(struct dfl_feature_platform_data *pdata, +int afu_dma_map_region(struct dfl_feature_dev_data *fdata, u64 user_addr, u64 length, u64 *iova) { + struct device *dev = &fdata->dev->dev; struct dfl_afu_dma_region *region; int ret; @@ -323,47 +324,47 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata, region->length = length; /* Pin the user memory region */ - ret = afu_dma_pin_pages(pdata, region); + ret = afu_dma_pin_pages(fdata, region); if (ret) { - dev_err(&pdata->dev->dev, "failed to pin memory region\n"); + dev_err(dev, "failed to pin memory region\n"); goto free_region; } /* Only accept continuous pages, return error else */ if (!afu_dma_check_continuous_pages(region)) { - dev_err(&pdata->dev->dev, "pages are not continuous\n"); + dev_err(dev, "pages are not continuous\n"); ret = -EINVAL; goto unpin_pages; } /* As pages are continuous then start to do DMA mapping */ - region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata), + region->iova = dma_map_page(dfl_fpga_fdata_to_parent(fdata), region->pages[0], 0, region->length, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { - dev_err(&pdata->dev->dev, "failed to map for dma\n"); + if (dma_mapping_error(dfl_fpga_fdata_to_parent(fdata), region->iova)) { + dev_err(dev, "failed to map for dma\n"); ret = -EFAULT; goto unpin_pages; } *iova = region->iova; - mutex_lock(&pdata->lock); - ret = afu_dma_region_add(pdata, region); - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + ret = afu_dma_region_add(fdata, region); + mutex_unlock(&fdata->lock); if (ret) { - dev_err(&pdata->dev->dev, "failed to add dma region\n"); + dev_err(dev, "failed to add dma region\n"); goto unmap_dma; } return 0; unmap_dma: - dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), + dma_unmap_page(dfl_fpga_fdata_to_parent(fdata), region->iova, region->length, DMA_BIDIRECTIONAL); unpin_pages: - afu_dma_unpin_pages(pdata, region); + afu_dma_unpin_pages(fdata, region); free_region: kfree(region); return ret; @@ -371,34 +372,34 @@ free_region: /** * afu_dma_unmap_region - unmap dma memory region - * @pdata: feature device platform data + * @fdata: feature dev data * @iova: dma address of the region * * Unmap dma memory region based on @iova. * Return 0 for success, otherwise error code. */ -int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova) +int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova) { struct dfl_afu_dma_region *region; - mutex_lock(&pdata->lock); - region = afu_dma_region_find_iova(pdata, iova); + mutex_lock(&fdata->lock); + region = afu_dma_region_find_iova(fdata, iova); if (!region) { - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return -EINVAL; } if (region->in_use) { - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return -EBUSY; } - afu_dma_region_remove(pdata, region); - mutex_unlock(&pdata->lock); + afu_dma_region_remove(fdata, region); + mutex_unlock(&fdata->lock); - dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), + dma_unmap_page(dfl_fpga_fdata_to_parent(fdata), region->iova, region->length, DMA_BIDIRECTIONAL); - afu_dma_unpin_pages(pdata, region); + afu_dma_unpin_pages(fdata, region); kfree(region); return 0; diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c index ab7be6217368..0f392d1f6d45 100644 --- a/drivers/fpga/dfl-afu-error.c +++ b/drivers/fpga/dfl-afu-error.c @@ -28,37 +28,36 @@ #define ERROR_MASK GENMASK_ULL(63, 0) /* mask or unmask port errors by the error mask register. */ -static void __afu_port_err_mask(struct device *dev, bool mask) +static void __afu_port_err_mask(struct dfl_feature_dev_data *fdata, bool mask) { void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR); writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK); } static void afu_port_err_mask(struct device *dev, bool mask) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); - mutex_lock(&pdata->lock); - __afu_port_err_mask(dev, mask); - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + __afu_port_err_mask(fdata, mask); + mutex_unlock(&fdata->lock); } /* clear port errors. */ static int afu_port_err_clear(struct device *dev, u64 err) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); - struct platform_device *pdev = to_platform_device(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base_err, *base_hdr; int enable_ret = 0, ret = -EBUSY; u64 v; - base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); - base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base_err = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR); + base_hdr = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); /* * clear Port Errors @@ -80,12 +79,12 @@ static int afu_port_err_clear(struct device *dev, u64 err) } /* Halt Port by keeping Port in reset */ - ret = __afu_port_disable(pdev); + ret = __afu_port_disable(fdata); if (ret) goto done; /* Mask all errors */ - __afu_port_err_mask(dev, true); + __afu_port_err_mask(fdata, true); /* Clear errors if err input matches with current port errors.*/ v = readq(base_err + PORT_ERROR); @@ -102,28 +101,28 @@ static int afu_port_err_clear(struct device *dev, u64 err) } /* Clear mask */ - __afu_port_err_mask(dev, false); + __afu_port_err_mask(fdata, false); /* Enable the Port by clearing the reset */ - enable_ret = __afu_port_enable(pdev); + enable_ret = __afu_port_enable(fdata); done: - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return enable_ret ? enable_ret : ret; } static ssize_t errors_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 error; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); error = readq(base + PORT_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)error); } @@ -146,15 +145,15 @@ static DEVICE_ATTR_RW(errors); static ssize_t first_error_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 error; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); error = readq(base + PORT_FIRST_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)error); } @@ -164,16 +163,16 @@ static ssize_t first_malformed_req_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 req0, req1; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); req0 = readq(base + PORT_MALFORMED_REQ0); req1 = readq(base + PORT_MALFORMED_REQ1); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%016llx%016llx\n", (unsigned long long)req1, (unsigned long long)req0); @@ -191,12 +190,14 @@ static umode_t port_err_attrs_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); + struct dfl_feature_dev_data *fdata; + fdata = to_dfl_feature_dev_data(dev); /* * sysfs entries are visible only if related private feature is * enumerated. */ - if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR)) + if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_ERROR)) return 0; return attr->mode; diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index 7f621e96d3b8..3bf8e7338dbe 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -26,7 +26,7 @@ /** * __afu_port_enable - enable a port by clear reset - * @pdev: port platform device. + * @fdata: port feature dev data. * * Enable Port by clear the port soft reset bit, which is set by default. * The AFU is unable to respond to any MMIO access while in reset. @@ -35,18 +35,17 @@ * * The caller needs to hold lock for protection. */ -int __afu_port_enable(struct platform_device *pdev) +int __afu_port_enable(struct dfl_feature_dev_data *fdata) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); void __iomem *base; u64 v; - WARN_ON(!pdata->disable_count); + WARN_ON(!fdata->disable_count); - if (--pdata->disable_count != 0) + if (--fdata->disable_count != 0) return 0; - base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); /* Clear port soft reset */ v = readq(base + PORT_HDR_CTRL); @@ -60,7 +59,8 @@ int __afu_port_enable(struct platform_device *pdev) if (readq_poll_timeout(base + PORT_HDR_CTRL, v, !(v & PORT_CTRL_SFTRST_ACK), RST_POLL_INVL, RST_POLL_TIMEOUT)) { - dev_err(&pdev->dev, "timeout, failure to enable device\n"); + dev_err(fdata->dfl_cdev->parent, + "timeout, failure to enable device\n"); return -ETIMEDOUT; } @@ -69,22 +69,21 @@ int __afu_port_enable(struct platform_device *pdev) /** * __afu_port_disable - disable a port by hold reset - * @pdev: port platform device. + * @fdata: port feature dev data. * * Disable Port by setting the port soft reset bit, it puts the port into reset. * * The caller needs to hold lock for protection. */ -int __afu_port_disable(struct platform_device *pdev) +int __afu_port_disable(struct dfl_feature_dev_data *fdata) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); void __iomem *base; u64 v; - if (pdata->disable_count++ != 0) + if (fdata->disable_count++ != 0) return 0; - base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); /* Set port soft reset */ v = readq(base + PORT_HDR_CTRL); @@ -99,7 +98,8 @@ int __afu_port_disable(struct platform_device *pdev) if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST_ACK, RST_POLL_INVL, RST_POLL_TIMEOUT)) { - dev_err(&pdev->dev, "timeout, failure to disable device\n"); + dev_err(fdata->dfl_cdev->parent, + "timeout, failure to disable device\n"); return -ETIMEDOUT; } @@ -118,34 +118,34 @@ int __afu_port_disable(struct platform_device *pdev) * (disabled). Any attempts on MMIO access to AFU while in reset, will * result errors reported via port error reporting sub feature (if present). */ -static int __port_reset(struct platform_device *pdev) +static int __port_reset(struct dfl_feature_dev_data *fdata) { int ret; - ret = __afu_port_disable(pdev); + ret = __afu_port_disable(fdata); if (ret) return ret; - return __afu_port_enable(pdev); + return __afu_port_enable(fdata); } static int port_reset(struct platform_device *pdev) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); int ret; - mutex_lock(&pdata->lock); - ret = __port_reset(pdev); - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + ret = __port_reset(fdata); + mutex_unlock(&fdata->lock); return ret; } -static int port_get_id(struct platform_device *pdev) +static int port_get_id(struct dfl_feature_dev_data *fdata) { void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP)); } @@ -153,7 +153,8 @@ static int port_get_id(struct platform_device *pdev) static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { - int id = port_get_id(to_platform_device(dev)); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); + int id = port_get_id(fdata); return scnprintf(buf, PAGE_SIZE, "%d\n", id); } @@ -162,15 +163,15 @@ static DEVICE_ATTR_RO(id); static ssize_t ltr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); v = readq(base + PORT_HDR_CTRL); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v)); } @@ -179,7 +180,7 @@ static ssize_t ltr_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; bool ltr; u64 v; @@ -187,14 +188,14 @@ ltr_store(struct device *dev, struct device_attribute *attr, if (kstrtobool(buf, <r)) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); v = readq(base + PORT_HDR_CTRL); v &= ~PORT_CTRL_LATENCY; v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0); writeq(v, base + PORT_HDR_CTRL); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return count; } @@ -203,15 +204,15 @@ static DEVICE_ATTR_RW(ltr); static ssize_t ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); v = readq(base + PORT_HDR_STS); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v)); } @@ -220,18 +221,18 @@ static ssize_t ap1_event_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; bool clear; if (kstrtobool(buf, &clear) || !clear) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return count; } @@ -241,15 +242,15 @@ static ssize_t ap2_event_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); v = readq(base + PORT_HDR_STS); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v)); } @@ -258,18 +259,18 @@ static ssize_t ap2_event_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; bool clear; if (kstrtobool(buf, &clear) || !clear) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return count; } @@ -278,15 +279,15 @@ static DEVICE_ATTR_RW(ap2_event); static ssize_t power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); v = readq(base + PORT_HDR_STS); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v)); } @@ -296,18 +297,18 @@ static ssize_t userclk_freqcmd_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); u64 userclk_freq_cmd; void __iomem *base; if (kstrtou64(buf, 0, &userclk_freq_cmd)) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return count; } @@ -317,18 +318,18 @@ static ssize_t userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); u64 userclk_freqcntr_cmd; void __iomem *base; if (kstrtou64(buf, 0, &userclk_freqcntr_cmd)) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return count; } @@ -338,15 +339,15 @@ static ssize_t userclk_freqsts_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); u64 userclk_freqsts; void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts); } @@ -356,15 +357,15 @@ static ssize_t userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); u64 userclk_freqcntrsts; void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqcntrsts); @@ -388,10 +389,12 @@ static umode_t port_hdr_attrs_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); + struct dfl_feature_dev_data *fdata; umode_t mode = attr->mode; void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + fdata = to_dfl_feature_dev_data(dev); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER); if (dfl_feature_revision(base) > 0) { /* @@ -456,21 +459,21 @@ static const struct dfl_feature_ops port_hdr_ops = { static ssize_t afu_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 guidl, guidh; - base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU); + base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_AFU); - mutex_lock(&pdata->lock); - if (pdata->disable_count) { - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + if (fdata->disable_count) { + mutex_unlock(&fdata->lock); return -EBUSY; } guidl = readq(base + GUID_L); guidh = readq(base + GUID_H); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl); } @@ -485,12 +488,14 @@ static umode_t port_afu_attrs_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); + struct dfl_feature_dev_data *fdata; + fdata = to_dfl_feature_dev_data(dev); /* * sysfs entries are visible only if related private feature is * enumerated. */ - if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU)) + if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_AFU)) return 0; return attr->mode; @@ -504,9 +509,10 @@ static const struct attribute_group port_afu_group = { static int port_afu_init(struct platform_device *pdev, struct dfl_feature *feature) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); struct resource *res = &pdev->resource[feature->resource_index]; - return afu_mmio_region_add(dev_get_platdata(&pdev->dev), + return afu_mmio_region_add(fdata, DFL_PORT_REGION_INDEX_AFU, resource_size(res), res->start, DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ | @@ -525,9 +531,10 @@ static const struct dfl_feature_ops port_afu_ops = { static int port_stp_init(struct platform_device *pdev, struct dfl_feature *feature) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); struct resource *res = &pdev->resource[feature->resource_index]; - return afu_mmio_region_add(dev_get_platdata(&pdev->dev), + return afu_mmio_region_add(fdata, DFL_PORT_REGION_INDEX_STP, resource_size(res), res->start, DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ | @@ -595,22 +602,18 @@ static struct dfl_feature_driver port_feature_drvs[] = { static int afu_open(struct inode *inode, struct file *filp) { - struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode); - struct dfl_feature_platform_data *pdata; + struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode); + struct platform_device *fdev = fdata->dev; int ret; - pdata = dev_get_platdata(&fdev->dev); - if (WARN_ON(!pdata)) - return -ENODEV; - - mutex_lock(&pdata->lock); - ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL); + mutex_lock(&fdata->lock); + ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL); if (!ret) { dev_dbg(&fdev->dev, "Device File Opened %d Times\n", - dfl_feature_dev_use_count(pdata)); + dfl_feature_dev_use_count(fdata)); filp->private_data = fdev; } - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret; } @@ -618,29 +621,29 @@ static int afu_open(struct inode *inode, struct file *filp) static int afu_release(struct inode *inode, struct file *filp) { struct platform_device *pdev = filp->private_data; - struct dfl_feature_platform_data *pdata; + struct dfl_feature_dev_data *fdata; struct dfl_feature *feature; dev_dbg(&pdev->dev, "Device File Release\n"); - pdata = dev_get_platdata(&pdev->dev); + fdata = to_dfl_feature_dev_data(&pdev->dev); - mutex_lock(&pdata->lock); - dfl_feature_dev_use_end(pdata); + mutex_lock(&fdata->lock); + dfl_feature_dev_use_end(fdata); - if (!dfl_feature_dev_use_count(pdata)) { - dfl_fpga_dev_for_each_feature(pdata, feature) + if (!dfl_feature_dev_use_count(fdata)) { + dfl_fpga_dev_for_each_feature(fdata, feature) dfl_fpga_set_irq_triggers(feature, 0, feature->nr_irqs, NULL); - __port_reset(pdev); - afu_dma_region_destroy(pdata); + __port_reset(fdata); + afu_dma_region_destroy(fdata); } - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return 0; } -static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata, +static long afu_ioctl_check_extension(struct dfl_feature_dev_data *fdata, unsigned long arg) { /* No extension support for now */ @@ -648,7 +651,7 @@ static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata, } static long -afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg) +afu_ioctl_get_info(struct dfl_feature_dev_data *fdata, void __user *arg) { struct dfl_fpga_port_info info; struct dfl_afu *afu; @@ -662,12 +665,12 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg) if (info.argsz < minsz) return -EINVAL; - mutex_lock(&pdata->lock); - afu = dfl_fpga_pdata_get_private(pdata); + mutex_lock(&fdata->lock); + afu = dfl_fpga_fdata_get_private(fdata); info.flags = 0; info.num_regions = afu->num_regions; info.num_umsgs = afu->num_umsgs; - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; @@ -675,7 +678,7 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg) return 0; } -static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata, +static long afu_ioctl_get_region_info(struct dfl_feature_dev_data *fdata, void __user *arg) { struct dfl_fpga_port_region_info rinfo; @@ -691,7 +694,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata, if (rinfo.argsz < minsz || rinfo.padding) return -EINVAL; - ret = afu_mmio_region_get_by_index(pdata, rinfo.index, ®ion); + ret = afu_mmio_region_get_by_index(fdata, rinfo.index, ®ion); if (ret) return ret; @@ -706,7 +709,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata, } static long -afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg) +afu_ioctl_dma_map(struct dfl_feature_dev_data *fdata, void __user *arg) { struct dfl_fpga_port_dma_map map; unsigned long minsz; @@ -720,16 +723,16 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg) if (map.argsz < minsz || map.flags) return -EINVAL; - ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova); + ret = afu_dma_map_region(fdata, map.user_addr, map.length, &map.iova); if (ret) return ret; if (copy_to_user(arg, &map, sizeof(map))) { - afu_dma_unmap_region(pdata, map.iova); + afu_dma_unmap_region(fdata, map.iova); return -EFAULT; } - dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n", + dev_dbg(&fdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n", (unsigned long long)map.user_addr, (unsigned long long)map.length, (unsigned long long)map.iova); @@ -738,7 +741,7 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg) } static long -afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg) +afu_ioctl_dma_unmap(struct dfl_feature_dev_data *fdata, void __user *arg) { struct dfl_fpga_port_dma_unmap unmap; unsigned long minsz; @@ -751,33 +754,33 @@ afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg) if (unmap.argsz < minsz || unmap.flags) return -EINVAL; - return afu_dma_unmap_region(pdata, unmap.iova); + return afu_dma_unmap_region(fdata, unmap.iova); } static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct platform_device *pdev = filp->private_data; - struct dfl_feature_platform_data *pdata; + struct dfl_feature_dev_data *fdata; struct dfl_feature *f; long ret; dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd); - pdata = dev_get_platdata(&pdev->dev); + fdata = to_dfl_feature_dev_data(&pdev->dev); switch (cmd) { case DFL_FPGA_GET_API_VERSION: return DFL_FPGA_API_VERSION; case DFL_FPGA_CHECK_EXTENSION: - return afu_ioctl_check_extension(pdata, arg); + return afu_ioctl_check_extension(fdata, arg); case DFL_FPGA_PORT_GET_INFO: - return afu_ioctl_get_info(pdata, (void __user *)arg); + return afu_ioctl_get_info(fdata, (void __user *)arg); case DFL_FPGA_PORT_GET_REGION_INFO: - return afu_ioctl_get_region_info(pdata, (void __user *)arg); + return afu_ioctl_get_region_info(fdata, (void __user *)arg); case DFL_FPGA_PORT_DMA_MAP: - return afu_ioctl_dma_map(pdata, (void __user *)arg); + return afu_ioctl_dma_map(fdata, (void __user *)arg); case DFL_FPGA_PORT_DMA_UNMAP: - return afu_ioctl_dma_unmap(pdata, (void __user *)arg); + return afu_ioctl_dma_unmap(fdata, (void __user *)arg); default: /* * Let sub-feature's ioctl function to handle the cmd @@ -785,7 +788,7 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) * handled in this sub feature, and returns 0 and other * error code if cmd is handled. */ - dfl_fpga_dev_for_each_feature(pdata, f) + dfl_fpga_dev_for_each_feature(fdata, f) if (f->ops && f->ops->ioctl) { ret = f->ops->ioctl(pdev, f, cmd, arg); if (ret != -ENODEV) @@ -805,8 +808,8 @@ static const struct vm_operations_struct afu_vma_ops = { static int afu_mmap(struct file *filp, struct vm_area_struct *vma) { struct platform_device *pdev = filp->private_data; - struct dfl_feature_platform_data *pdata; u64 size = vma->vm_end - vma->vm_start; + struct dfl_feature_dev_data *fdata; struct dfl_afu_mmio_region region; u64 offset; int ret; @@ -814,10 +817,10 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - pdata = dev_get_platdata(&pdev->dev); + fdata = to_dfl_feature_dev_data(&pdev->dev); offset = vma->vm_pgoff << PAGE_SHIFT; - ret = afu_mmio_region_get_by_offset(pdata, offset, size, ®ion); + ret = afu_mmio_region_get_by_offset(fdata, offset, size, ®ion); if (ret) return ret; @@ -851,48 +854,45 @@ static const struct file_operations afu_fops = { static int afu_dev_init(struct platform_device *pdev) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); struct dfl_afu *afu; afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL); if (!afu) return -ENOMEM; - afu->pdata = pdata; - - mutex_lock(&pdata->lock); - dfl_fpga_pdata_set_private(pdata, afu); - afu_mmio_region_init(pdata); - afu_dma_region_init(pdata); - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + dfl_fpga_fdata_set_private(fdata, afu); + afu_mmio_region_init(fdata); + afu_dma_region_init(fdata); + mutex_unlock(&fdata->lock); return 0; } static int afu_dev_destroy(struct platform_device *pdev) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); - mutex_lock(&pdata->lock); - afu_mmio_region_destroy(pdata); - afu_dma_region_destroy(pdata); - dfl_fpga_pdata_set_private(pdata, NULL); - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + afu_mmio_region_destroy(fdata); + afu_dma_region_destroy(fdata); + dfl_fpga_fdata_set_private(fdata, NULL); + mutex_unlock(&fdata->lock); return 0; } -static int port_enable_set(struct platform_device *pdev, bool enable) +static int port_enable_set(struct dfl_feature_dev_data *fdata, bool enable) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); int ret; - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); if (enable) - ret = __afu_port_enable(pdev); + ret = __afu_port_enable(fdata); else - ret = __afu_port_disable(pdev); - mutex_unlock(&pdata->lock); + ret = __afu_port_disable(fdata); + mutex_unlock(&fdata->lock); return ret; } @@ -932,15 +932,13 @@ exit: return ret; } -static int afu_remove(struct platform_device *pdev) +static void afu_remove(struct platform_device *pdev) { dev_dbg(&pdev->dev, "%s\n", __func__); dfl_fpga_dev_ops_unregister(pdev); dfl_fpga_dev_feature_uinit(pdev); afu_dev_destroy(pdev); - - return 0; } static const struct attribute_group *afu_dev_groups[] = { @@ -951,12 +949,12 @@ static const struct attribute_group *afu_dev_groups[] = { }; static struct platform_driver afu_driver = { - .driver = { - .name = DFL_FPGA_FEATURE_DEV_PORT, + .driver = { + .name = DFL_FPGA_FEATURE_DEV_PORT, .dev_groups = afu_dev_groups, }, - .probe = afu_probe, - .remove = afu_remove, + .probe = afu_probe, + .remove = afu_remove, }; static int __init afu_init(void) diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c index 0804b7a0c298..b11a5b21e666 100644 --- a/drivers/fpga/dfl-afu-region.c +++ b/drivers/fpga/dfl-afu-region.c @@ -12,11 +12,11 @@ /** * afu_mmio_region_init - init function for afu mmio region support - * @pdata: afu platform device's pdata. + * @fdata: afu feature dev data */ -void afu_mmio_region_init(struct dfl_feature_platform_data *pdata) +void afu_mmio_region_init(struct dfl_feature_dev_data *fdata) { - struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); + struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata); INIT_LIST_HEAD(&afu->regions); } @@ -39,6 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu, /** * afu_mmio_region_add - add a mmio region to given feature dev. * + * @fdata: afu feature dev data * @region_index: region index. * @region_size: region size. * @phys: region's physical address of this region. @@ -46,14 +47,15 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu, * * Return: 0 on success, negative error code otherwise. */ -int afu_mmio_region_add(struct dfl_feature_platform_data *pdata, +int afu_mmio_region_add(struct dfl_feature_dev_data *fdata, u32 region_index, u64 region_size, u64 phys, u32 flags) { + struct device *dev = &fdata->dev->dev; struct dfl_afu_mmio_region *region; struct dfl_afu *afu; int ret = 0; - region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL); + region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL); if (!region) return -ENOMEM; @@ -62,13 +64,13 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata, region->phys = phys; region->flags = flags; - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); - afu = dfl_fpga_pdata_get_private(pdata); + afu = dfl_fpga_fdata_get_private(fdata); /* check if @index already exists */ if (get_region_by_index(afu, region_index)) { - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); ret = -EEXIST; goto exit; } @@ -79,37 +81,37 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata, afu->region_cur_offset += region_size; afu->num_regions++; - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return 0; exit: - devm_kfree(&pdata->dev->dev, region); + devm_kfree(dev, region); return ret; } /** * afu_mmio_region_destroy - destroy all mmio regions under given feature dev. - * @pdata: afu platform device's pdata. + * @fdata: afu feature dev data */ -void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata) +void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata) { - struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); + struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata); struct dfl_afu_mmio_region *tmp, *region; list_for_each_entry_safe(region, tmp, &afu->regions, node) - devm_kfree(&pdata->dev->dev, region); + devm_kfree(&fdata->dev->dev, region); } /** * afu_mmio_region_get_by_index - find an afu region by index. - * @pdata: afu platform device's pdata. + * @fdata: afu feature dev data * @region_index: region index. * @pregion: ptr to region for result. * * Return: 0 on success, negative error code otherwise. */ -int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata, +int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata, u32 region_index, struct dfl_afu_mmio_region *pregion) { @@ -117,8 +119,8 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata, struct dfl_afu *afu; int ret = 0; - mutex_lock(&pdata->lock); - afu = dfl_fpga_pdata_get_private(pdata); + mutex_lock(&fdata->lock); + afu = dfl_fpga_fdata_get_private(fdata); region = get_region_by_index(afu, region_index); if (!region) { ret = -EINVAL; @@ -126,14 +128,14 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata, } *pregion = *region; exit: - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret; } /** * afu_mmio_region_get_by_offset - find an afu mmio region by offset and size * - * @pdata: afu platform device's pdata. + * @fdata: afu feature dev data * @offset: region offset from start of the device fd. * @size: region size. * @pregion: ptr to region for result. @@ -143,7 +145,7 @@ exit: * * Return: 0 on success, negative error code otherwise. */ -int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata, +int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata, u64 offset, u64 size, struct dfl_afu_mmio_region *pregion) { @@ -151,8 +153,8 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata, struct dfl_afu *afu; int ret = 0; - mutex_lock(&pdata->lock); - afu = dfl_fpga_pdata_get_private(pdata); + mutex_lock(&fdata->lock); + afu = dfl_fpga_fdata_get_private(fdata); for_each_region(region, afu) if (region->offset <= offset && region->offset + region->size >= offset + size) { @@ -161,6 +163,6 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata, } ret = -EINVAL; exit: - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret; } diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h index e5020e2b1f3d..03be4f0969c7 100644 --- a/drivers/fpga/dfl-afu.h +++ b/drivers/fpga/dfl-afu.h @@ -41,7 +41,7 @@ struct dfl_afu_mmio_region { }; /** - * struct fpga_afu_dma_region - afu DMA region data structure + * struct dfl_afu_dma_region - afu DMA region data structure * * @user_addr: region userspace virtual address. * @length: region length. @@ -67,7 +67,6 @@ struct dfl_afu_dma_region { * @regions: the mmio region linked list of this afu feature device. * @dma_regions: root of dma regions rb tree. * @num_umsgs: num of umsgs. - * @pdata: afu platform device's pdata. */ struct dfl_afu { u64 region_cur_offset; @@ -75,31 +74,29 @@ struct dfl_afu { u8 num_umsgs; struct list_head regions; struct rb_root dma_regions; - - struct dfl_feature_platform_data *pdata; }; -/* hold pdata->lock when call __afu_port_enable/disable */ -int __afu_port_enable(struct platform_device *pdev); -int __afu_port_disable(struct platform_device *pdev); +/* hold fdata->lock when call __afu_port_enable/disable */ +int __afu_port_enable(struct dfl_feature_dev_data *fdata); +int __afu_port_disable(struct dfl_feature_dev_data *fdata); -void afu_mmio_region_init(struct dfl_feature_platform_data *pdata); -int afu_mmio_region_add(struct dfl_feature_platform_data *pdata, +void afu_mmio_region_init(struct dfl_feature_dev_data *fdata); +int afu_mmio_region_add(struct dfl_feature_dev_data *fdata, u32 region_index, u64 region_size, u64 phys, u32 flags); -void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata); -int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata, +void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata); +int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata, u32 region_index, struct dfl_afu_mmio_region *pregion); -int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata, +int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata, u64 offset, u64 size, struct dfl_afu_mmio_region *pregion); -void afu_dma_region_init(struct dfl_feature_platform_data *pdata); -void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata); -int afu_dma_map_region(struct dfl_feature_platform_data *pdata, +void afu_dma_region_init(struct dfl_feature_dev_data *fdata); +void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata); +int afu_dma_map_region(struct dfl_feature_dev_data *fdata, u64 user_addr, u64 length, u64 *iova); -int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova); +int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova); struct dfl_afu_dma_region * -afu_dma_region_find(struct dfl_feature_platform_data *pdata, +afu_dma_region_find(struct dfl_feature_dev_data *fdata, u64 iova, u64 size); extern const struct dfl_feature_ops port_err_ops; diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c index 3ff9f3a687ce..28b0f9d062ac 100644 --- a/drivers/fpga/dfl-fme-br.c +++ b/drivers/fpga/dfl-fme-br.c @@ -22,34 +22,34 @@ struct fme_br_priv { struct dfl_fme_br_pdata *pdata; struct dfl_fpga_port_ops *port_ops; - struct platform_device *port_pdev; + struct dfl_feature_dev_data *port_fdata; }; static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable) { struct fme_br_priv *priv = bridge->priv; - struct platform_device *port_pdev; + struct dfl_feature_dev_data *port_fdata; struct dfl_fpga_port_ops *ops; - if (!priv->port_pdev) { - port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev, - &priv->pdata->port_id, - dfl_fpga_check_port_id); - if (!port_pdev) + if (!priv->port_fdata) { + port_fdata = dfl_fpga_cdev_find_port_data(priv->pdata->cdev, + &priv->pdata->port_id, + dfl_fpga_check_port_id); + if (!port_fdata) return -ENODEV; - priv->port_pdev = port_pdev; + priv->port_fdata = port_fdata; } - if (priv->port_pdev && !priv->port_ops) { - ops = dfl_fpga_port_ops_get(priv->port_pdev); + if (priv->port_fdata && !priv->port_ops) { + ops = dfl_fpga_port_ops_get(priv->port_fdata); if (!ops || !ops->enable_set) return -ENOENT; priv->port_ops = ops; } - return priv->port_ops->enable_set(priv->port_pdev, enable); + return priv->port_ops->enable_set(priv->port_fdata, enable); } static const struct fpga_bridge_ops fme_bridge_ops = { @@ -68,37 +68,33 @@ static int fme_br_probe(struct platform_device *pdev) priv->pdata = dev_get_platdata(dev); - br = devm_fpga_bridge_create(dev, "DFL FPGA FME Bridge", - &fme_bridge_ops, priv); - if (!br) - return -ENOMEM; + br = fpga_bridge_register(dev, "DFL FPGA FME Bridge", + &fme_bridge_ops, priv); + if (IS_ERR(br)) + return PTR_ERR(br); platform_set_drvdata(pdev, br); - return fpga_bridge_register(br); + return 0; } -static int fme_br_remove(struct platform_device *pdev) +static void fme_br_remove(struct platform_device *pdev) { struct fpga_bridge *br = platform_get_drvdata(pdev); struct fme_br_priv *priv = br->priv; fpga_bridge_unregister(br); - if (priv->port_pdev) - put_device(&priv->port_pdev->dev); if (priv->port_ops) dfl_fpga_port_ops_put(priv->port_ops); - - return 0; } static struct platform_driver fme_br_driver = { - .driver = { - .name = DFL_FPGA_FME_BRIDGE, + .driver = { + .name = DFL_FPGA_FME_BRIDGE, }, - .probe = fme_br_probe, - .remove = fme_br_remove, + .probe = fme_br_probe, + .remove = fme_br_remove, }; module_platform_driver(fme_br_driver); diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c index 51c2892ec06d..f00d949efe69 100644 --- a/drivers/fpga/dfl-fme-error.c +++ b/drivers/fpga/dfl-fme-error.c @@ -42,15 +42,15 @@ static ssize_t pcie0_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + PCIE0_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -59,7 +59,7 @@ static ssize_t pcie0_errors_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; int ret = 0; u64 v, val; @@ -67,9 +67,9 @@ static ssize_t pcie0_errors_store(struct device *dev, if (kstrtou64(buf, 0, &val)) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK); v = readq(base + PCIE0_ERROR); @@ -79,7 +79,7 @@ static ssize_t pcie0_errors_store(struct device *dev, ret = -EINVAL; writeq(0ULL, base + PCIE0_ERROR_MASK); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret ? ret : count; } static DEVICE_ATTR_RW(pcie0_errors); @@ -87,15 +87,15 @@ static DEVICE_ATTR_RW(pcie0_errors); static ssize_t pcie1_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + PCIE1_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -104,7 +104,7 @@ static ssize_t pcie1_errors_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; int ret = 0; u64 v, val; @@ -112,9 +112,9 @@ static ssize_t pcie1_errors_store(struct device *dev, if (kstrtou64(buf, 0, &val)) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK); v = readq(base + PCIE1_ERROR); @@ -124,7 +124,7 @@ static ssize_t pcie1_errors_store(struct device *dev, ret = -EINVAL; writeq(0ULL, base + PCIE1_ERROR_MASK); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret ? ret : count; } static DEVICE_ATTR_RW(pcie1_errors); @@ -132,9 +132,10 @@ static DEVICE_ATTR_RW(pcie1_errors); static ssize_t nonfatal_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); return sprintf(buf, "0x%llx\n", (unsigned long long)readq(base + RAS_NONFAT_ERROR)); @@ -144,9 +145,10 @@ static DEVICE_ATTR_RO(nonfatal_errors); static ssize_t catfatal_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); return sprintf(buf, "0x%llx\n", (unsigned long long)readq(base + RAS_CATFAT_ERROR)); @@ -156,15 +158,15 @@ static DEVICE_ATTR_RO(catfatal_errors); static ssize_t inject_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); v = readq(base + RAS_ERROR_INJECT); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v)); @@ -174,7 +176,7 @@ static ssize_t inject_errors_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u8 inject_error; u64 v; @@ -185,14 +187,14 @@ static ssize_t inject_errors_store(struct device *dev, if (inject_error & ~INJECT_ERROR_MASK) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); v = readq(base + RAS_ERROR_INJECT); v &= ~INJECT_ERROR_MASK; v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error); writeq(v, base + RAS_ERROR_INJECT); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return count; } @@ -201,15 +203,15 @@ static DEVICE_ATTR_RW(inject_errors); static ssize_t fme_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + FME_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -218,7 +220,7 @@ static ssize_t fme_errors_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v, val; int ret = 0; @@ -226,9 +228,9 @@ static ssize_t fme_errors_store(struct device *dev, if (kstrtou64(buf, 0, &val)) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK); v = readq(base + FME_ERROR); @@ -240,7 +242,7 @@ static ssize_t fme_errors_store(struct device *dev, /* Workaround: disable MBP_ERROR if feature revision is 0 */ writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR, base + FME_ERROR_MASK); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret ? ret : count; } static DEVICE_ATTR_RW(fme_errors); @@ -248,15 +250,15 @@ static DEVICE_ATTR_RW(fme_errors); static ssize_t first_error_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + FME_FIRST_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -265,15 +267,15 @@ static DEVICE_ATTR_RO(first_error); static ssize_t next_error_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + FME_NEXT_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -295,12 +297,14 @@ static umode_t fme_global_err_attrs_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); + struct dfl_feature_dev_data *fdata; + fdata = to_dfl_feature_dev_data(dev); /* * sysfs entries are visible only if related private feature is * enumerated. */ - if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR)) + if (!dfl_get_feature_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR)) return 0; return attr->mode; @@ -314,12 +318,12 @@ const struct attribute_group fme_global_err_group = { static void fme_err_mask(struct device *dev, bool mask) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); /* Workaround: keep MBP_ERROR always masked if revision is 0 */ if (dfl_feature_revision(base)) @@ -332,7 +336,7 @@ static void fme_err_mask(struct device *dev, bool mask) writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK); writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); } static int fme_global_err_init(struct platform_device *pdev, diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c index 77ea04d4edbe..8aca2fb20e87 100644 --- a/drivers/fpga/dfl-fme-main.c +++ b/drivers/fpga/dfl-fme-main.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/units.h> #include <linux/fpga-dfl.h> #include "dfl.h" @@ -27,10 +28,11 @@ static ssize_t ports_num_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); v = readq(base + FME_HDR_CAP); @@ -46,10 +48,11 @@ static DEVICE_ATTR_RO(ports_num); static ssize_t bitstream_id_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); v = readq(base + FME_HDR_BITSTREAM_ID); @@ -64,10 +67,11 @@ static DEVICE_ATTR_RO(bitstream_id); static ssize_t bitstream_metadata_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); v = readq(base + FME_HDR_BITSTREAM_MD); @@ -78,10 +82,11 @@ static DEVICE_ATTR_RO(bitstream_metadata); static ssize_t cache_size_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); v = readq(base + FME_HDR_CAP); @@ -93,10 +98,11 @@ static DEVICE_ATTR_RO(cache_size); static ssize_t fabric_version_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); v = readq(base + FME_HDR_CAP); @@ -108,10 +114,11 @@ static DEVICE_ATTR_RO(fabric_version); static ssize_t socket_id_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); v = readq(base + FME_HDR_CAP); @@ -134,10 +141,10 @@ static const struct attribute_group fme_hdr_group = { .attrs = fme_hdr_attrs, }; -static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata, +static long fme_hdr_ioctl_release_port(struct dfl_feature_dev_data *fdata, unsigned long arg) { - struct dfl_fpga_cdev *cdev = pdata->dfl_cdev; + struct dfl_fpga_cdev *cdev = fdata->dfl_cdev; int port_id; if (get_user(port_id, (int __user *)arg)) @@ -146,10 +153,10 @@ static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata, return dfl_fpga_cdev_release_port(cdev, port_id); } -static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata, +static long fme_hdr_ioctl_assign_port(struct dfl_feature_dev_data *fdata, unsigned long arg) { - struct dfl_fpga_cdev *cdev = pdata->dfl_cdev; + struct dfl_fpga_cdev *cdev = fdata->dfl_cdev; int port_id; if (get_user(port_id, (int __user *)arg)) @@ -162,13 +169,13 @@ static long fme_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature, unsigned int cmd, unsigned long arg) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); switch (cmd) { case DFL_FPGA_FME_PORT_RELEASE: - return fme_hdr_ioctl_release_port(pdata, arg); + return fme_hdr_ioctl_release_port(fdata, arg); case DFL_FPGA_FME_PORT_ASSIGN: - return fme_hdr_ioctl_assign_port(pdata, arg); + return fme_hdr_ioctl_assign_port(fdata, arg); } return -ENODEV; @@ -231,19 +238,19 @@ static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type, switch (attr) { case hwmon_temp_input: v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1); - *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000); + *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * MILLI); break; case hwmon_temp_max: v = readq(feature->ioaddr + FME_THERM_THRESHOLD); - *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000); + *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * MILLI); break; case hwmon_temp_crit: v = readq(feature->ioaddr + FME_THERM_THRESHOLD); - *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000); + *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * MILLI); break; case hwmon_temp_emergency: v = readq(feature->ioaddr + FME_THERM_THRESHOLD); - *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000); + *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * MILLI); break; case hwmon_temp_max_alarm: v = readq(feature->ioaddr + FME_THERM_THRESHOLD); @@ -265,7 +272,7 @@ static const struct hwmon_ops thermal_hwmon_ops = { .read = thermal_hwmon_read, }; -static const struct hwmon_channel_info *thermal_hwmon_info[] = { +static const struct hwmon_channel_info * const thermal_hwmon_info[] = { HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY | HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_CRIT | HWMON_T_CRIT_ALARM), @@ -382,15 +389,15 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type, switch (attr) { case hwmon_power_input: v = readq(feature->ioaddr + FME_PWR_STATUS); - *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000); + *val = (long)(FIELD_GET(PWR_CONSUMED, v) * MICRO); break; case hwmon_power_max: v = readq(feature->ioaddr + FME_PWR_THRESHOLD); - *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000); + *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * MICRO); break; case hwmon_power_crit: v = readq(feature->ioaddr + FME_PWR_THRESHOLD); - *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000); + *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * MICRO); break; case hwmon_power_max_alarm: v = readq(feature->ioaddr + FME_PWR_THRESHOLD); @@ -410,14 +417,14 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type, static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev->parent); struct dfl_feature *feature = dev_get_drvdata(dev); int ret = 0; u64 v; - val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX); + val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); switch (attr) { case hwmon_power_max: @@ -437,7 +444,7 @@ static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type, break; } - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret; } @@ -465,7 +472,7 @@ static const struct hwmon_ops power_hwmon_ops = { .write = power_hwmon_write, }; -static const struct hwmon_channel_info *power_hwmon_info[] = { +static const struct hwmon_channel_info * const power_hwmon_info[] = { HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_MAX | HWMON_P_MAX_ALARM | HWMON_P_CRIT | HWMON_P_CRIT_ALARM), @@ -588,7 +595,7 @@ static struct dfl_feature_driver fme_feature_drvs[] = { }, }; -static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata, +static long fme_ioctl_check_extension(struct dfl_feature_dev_data *fdata, unsigned long arg) { /* No extension support for now */ @@ -597,49 +604,46 @@ static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata, static int fme_open(struct inode *inode, struct file *filp) { - struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode); - struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev); + struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode); + struct platform_device *fdev = fdata->dev; int ret; - if (WARN_ON(!pdata)) - return -ENODEV; - - mutex_lock(&pdata->lock); - ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL); + mutex_lock(&fdata->lock); + ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL); if (!ret) { dev_dbg(&fdev->dev, "Device File Opened %d Times\n", - dfl_feature_dev_use_count(pdata)); - filp->private_data = pdata; + dfl_feature_dev_use_count(fdata)); + filp->private_data = fdata; } - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret; } static int fme_release(struct inode *inode, struct file *filp) { - struct dfl_feature_platform_data *pdata = filp->private_data; - struct platform_device *pdev = pdata->dev; + struct dfl_feature_dev_data *fdata = filp->private_data; + struct platform_device *pdev = fdata->dev; struct dfl_feature *feature; dev_dbg(&pdev->dev, "Device File Release\n"); - mutex_lock(&pdata->lock); - dfl_feature_dev_use_end(pdata); + mutex_lock(&fdata->lock); + dfl_feature_dev_use_end(fdata); - if (!dfl_feature_dev_use_count(pdata)) - dfl_fpga_dev_for_each_feature(pdata, feature) + if (!dfl_feature_dev_use_count(fdata)) + dfl_fpga_dev_for_each_feature(fdata, feature) dfl_fpga_set_irq_triggers(feature, 0, feature->nr_irqs, NULL); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return 0; } static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - struct dfl_feature_platform_data *pdata = filp->private_data; - struct platform_device *pdev = pdata->dev; + struct dfl_feature_dev_data *fdata = filp->private_data; + struct platform_device *pdev = fdata->dev; struct dfl_feature *f; long ret; @@ -649,7 +653,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case DFL_FPGA_GET_API_VERSION: return DFL_FPGA_API_VERSION; case DFL_FPGA_CHECK_EXTENSION: - return fme_ioctl_check_extension(pdata, arg); + return fme_ioctl_check_extension(fdata, arg); default: /* * Let sub-feature's ioctl function to handle the cmd. @@ -657,7 +661,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) * handled in this sub feature, and returns 0 or other * error code if cmd is handled. */ - dfl_fpga_dev_for_each_feature(pdata, f) { + dfl_fpga_dev_for_each_feature(fdata, f) { if (f->ops && f->ops->ioctl) { ret = f->ops->ioctl(pdev, f, cmd, arg); if (ret != -ENODEV) @@ -671,29 +675,27 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static int fme_dev_init(struct platform_device *pdev) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); struct dfl_fme *fme; fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL); if (!fme) return -ENOMEM; - fme->pdata = pdata; - - mutex_lock(&pdata->lock); - dfl_fpga_pdata_set_private(pdata, fme); - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + dfl_fpga_fdata_set_private(fdata, fme); + mutex_unlock(&fdata->lock); return 0; } static void fme_dev_destroy(struct platform_device *pdev) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); - mutex_lock(&pdata->lock); - dfl_fpga_pdata_set_private(pdata, NULL); - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + dfl_fpga_fdata_set_private(fdata, NULL); + mutex_unlock(&fdata->lock); } static const struct file_operations fme_fops = { @@ -729,13 +731,11 @@ exit: return ret; } -static int fme_remove(struct platform_device *pdev) +static void fme_remove(struct platform_device *pdev) { dfl_fpga_dev_ops_unregister(pdev); dfl_fpga_dev_feature_uinit(pdev); fme_dev_destroy(pdev); - - return 0; } static const struct attribute_group *fme_dev_groups[] = { @@ -745,12 +745,12 @@ static const struct attribute_group *fme_dev_groups[] = { }; static struct platform_driver fme_driver = { - .driver = { - .name = DFL_FPGA_FEATURE_DEV_FME, + .driver = { + .name = DFL_FPGA_FEATURE_DEV_FME, .dev_groups = fme_dev_groups, }, - .probe = fme_probe, - .remove = fme_remove, + .probe = fme_probe, + .remove = fme_remove, }; module_platform_driver(fme_driver); diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c index d5861d13b306..ab228d8837a0 100644 --- a/drivers/fpga/dfl-fme-mgr.c +++ b/drivers/fpga/dfl-fme-mgr.c @@ -252,11 +252,6 @@ static int fme_mgr_write_complete(struct fpga_manager *mgr, return 0; } -static enum fpga_mgr_states fme_mgr_state(struct fpga_manager *mgr) -{ - return FPGA_MGR_STATE_UNKNOWN; -} - static u64 fme_mgr_status(struct fpga_manager *mgr) { struct fme_mgr_priv *priv = mgr->priv; @@ -268,7 +263,6 @@ static const struct fpga_manager_ops fme_mgr_ops = { .write_init = fme_mgr_write_init, .write = fme_mgr_write, .write_complete = fme_mgr_write_complete, - .state = fme_mgr_state, .status = fme_mgr_status, }; @@ -282,11 +276,10 @@ static void fme_mgr_get_compat_id(void __iomem *fme_pr, static int fme_mgr_probe(struct platform_device *pdev) { struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev); - struct fpga_compat_id *compat_id; + struct fpga_manager_info info = { 0 }; struct device *dev = &pdev->dev; struct fme_mgr_priv *priv; struct fpga_manager *mgr; - struct resource *res; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -296,26 +289,21 @@ static int fme_mgr_probe(struct platform_device *pdev) priv->ioaddr = pdata->ioaddr; if (!priv->ioaddr) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->ioaddr = devm_ioremap_resource(dev, res); + priv->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->ioaddr)) return PTR_ERR(priv->ioaddr); } - compat_id = devm_kzalloc(dev, sizeof(*compat_id), GFP_KERNEL); - if (!compat_id) + info.name = "DFL FME FPGA Manager"; + info.mops = &fme_mgr_ops; + info.priv = priv; + info.compat_id = devm_kzalloc(dev, sizeof(*info.compat_id), GFP_KERNEL); + if (!info.compat_id) return -ENOMEM; - fme_mgr_get_compat_id(priv->ioaddr, compat_id); - - mgr = devm_fpga_mgr_create(dev, "DFL FME FPGA Manager", - &fme_mgr_ops, priv); - if (!mgr) - return -ENOMEM; - - mgr->compat_id = compat_id; - - return devm_fpga_mgr_register(dev, mgr); + fme_mgr_get_compat_id(priv->ioaddr, info.compat_id); + mgr = devm_fpga_mgr_register_full(dev, &info); + return PTR_ERR_OR_ZERO(mgr); } static struct platform_driver fme_mgr_driver = { diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c index 4299145ef347..7422d2bc6f37 100644 --- a/drivers/fpga/dfl-fme-perf.c +++ b/drivers/fpga/dfl-fme-perf.c @@ -141,7 +141,7 @@ * @fab_port_id: used to indicate current working mode of fabric counters. * @fab_lock: lock to protect fabric counters working mode. * @cpu: active CPU to which the PMU is bound for accesses. - * @cpuhp_node: node for CPU hotplug notifier link. + * @node: node for CPU hotplug notifier link. * @cpuhp_state: state for CPU hotplug notification; */ struct fme_perf_priv { @@ -953,6 +953,8 @@ static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) return 0; priv->cpu = target; + perf_pmu_migrate_context(&priv->pmu, cpu, target); + return 0; } diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c index 1194c0e850e0..b878b260af38 100644 --- a/drivers/fpga/dfl-fme-pr.c +++ b/drivers/fpga/dfl-fme-pr.c @@ -65,7 +65,7 @@ static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id) static int fme_pr(struct platform_device *pdev, unsigned long arg) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); void __user *argp = (void __user *)arg; struct dfl_fpga_fme_port_pr port_pr; struct fpga_image_info *info; @@ -87,8 +87,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) return -EINVAL; /* get fme header region */ - fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev, - FME_FEATURE_ID_HEADER); + fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); /* check port id */ v = readq(fme_hdr + FME_HDR_CAP); @@ -123,8 +122,8 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) info->flags |= FPGA_MGR_PARTIAL_RECONFIG; - mutex_lock(&pdata->lock); - fme = dfl_fpga_pdata_get_private(pdata); + mutex_lock(&fdata->lock); + fme = dfl_fpga_fdata_get_private(fdata); /* fme device has been unregistered. */ if (!fme) { ret = -EINVAL; @@ -148,7 +147,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) /* * it allows userspace to reset the PR region's logic by disabling and - * reenabling the bridge to clear things out between accleration runs. + * reenabling the bridge to clear things out between acceleration runs. * so no need to hold the bridges after partial reconfiguration. */ if (region->get_bridges) @@ -156,7 +155,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) put_device(®ion->dev); unlock_exit: - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); free_exit: vfree(buf); return ret; @@ -164,16 +163,16 @@ free_exit: /** * dfl_fme_create_mgr - create fpga mgr platform device as child device - * - * @pdata: fme platform_device's pdata + * @fdata: fme feature dev data + * @feature: sub feature info * * Return: mgr platform device if successful, and error code otherwise. */ static struct platform_device * -dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata, +dfl_fme_create_mgr(struct dfl_feature_dev_data *fdata, struct dfl_feature *feature) { - struct platform_device *mgr, *fme = pdata->dev; + struct platform_device *mgr, *fme = fdata->dev; struct dfl_fme_mgr_pdata mgr_pdata; int ret = -ENOMEM; @@ -209,11 +208,11 @@ create_mgr_err: /** * dfl_fme_destroy_mgr - destroy fpga mgr platform device - * @pdata: fme platform device's pdata + * @fdata: fme feature dev data */ -static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata) +static void dfl_fme_destroy_mgr(struct dfl_feature_dev_data *fdata) { - struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata); + struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata); platform_device_unregister(priv->mgr); } @@ -221,15 +220,15 @@ static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata) /** * dfl_fme_create_bridge - create fme fpga bridge platform device as child * - * @pdata: fme platform device's pdata + * @fdata: fme feature dev data * @port_id: port id for the bridge to be created. * * Return: bridge platform device if successful, and error code otherwise. */ static struct dfl_fme_bridge * -dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id) +dfl_fme_create_bridge(struct dfl_feature_dev_data *fdata, int port_id) { - struct device *dev = &pdata->dev->dev; + struct device *dev = &fdata->dev->dev; struct dfl_fme_br_pdata br_pdata; struct dfl_fme_bridge *fme_br; int ret = -ENOMEM; @@ -238,7 +237,7 @@ dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id) if (!fme_br) return ERR_PTR(ret); - br_pdata.cdev = pdata->dfl_cdev; + br_pdata.cdev = fdata->dfl_cdev; br_pdata.port_id = port_id; fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE, @@ -273,12 +272,12 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br) } /** - * dfl_fme_destroy_bridge - destroy all fpga bridge platform device - * @pdata: fme platform device's pdata + * dfl_fme_destroy_bridges - destroy all fpga bridge platform device + * @fdata: fme feature dev data */ -static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata) +static void dfl_fme_destroy_bridges(struct dfl_feature_dev_data *fdata) { - struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata); + struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata); struct dfl_fme_bridge *fbridge, *tmp; list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) { @@ -290,7 +289,7 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata) /** * dfl_fme_create_region - create fpga region platform device as child * - * @pdata: fme platform device's pdata + * @fdata: fme feature dev data * @mgr: mgr platform device needed for region * @br: br platform device needed for region * @port_id: port id @@ -298,12 +297,12 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata) * Return: fme region if successful, and error code otherwise. */ static struct dfl_fme_region * -dfl_fme_create_region(struct dfl_feature_platform_data *pdata, +dfl_fme_create_region(struct dfl_feature_dev_data *fdata, struct platform_device *mgr, struct platform_device *br, int port_id) { struct dfl_fme_region_pdata region_pdata; - struct device *dev = &pdata->dev->dev; + struct device *dev = &fdata->dev->dev; struct dfl_fme_region *fme_region; int ret = -ENOMEM; @@ -353,11 +352,11 @@ static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region) /** * dfl_fme_destroy_regions - destroy all fme regions - * @pdata: fme platform device's pdata + * @fdata: fme feature dev data */ -static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata) +static void dfl_fme_destroy_regions(struct dfl_feature_dev_data *fdata) { - struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata); + struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata); struct dfl_fme_region *fme_region, *tmp; list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) { @@ -369,7 +368,7 @@ static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata) static int pr_mgmt_init(struct platform_device *pdev, struct dfl_feature *feature) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); struct dfl_fme_region *fme_region; struct dfl_fme_bridge *fme_br; struct platform_device *mgr; @@ -378,18 +377,17 @@ static int pr_mgmt_init(struct platform_device *pdev, int ret = -ENODEV, i = 0; u64 fme_cap, port_offset; - fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev, - FME_FEATURE_ID_HEADER); + fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); - mutex_lock(&pdata->lock); - priv = dfl_fpga_pdata_get_private(pdata); + mutex_lock(&fdata->lock); + priv = dfl_fpga_fdata_get_private(fdata); /* Initialize the region and bridge sub device list */ INIT_LIST_HEAD(&priv->region_list); INIT_LIST_HEAD(&priv->bridge_list); /* Create fpga mgr platform device */ - mgr = dfl_fme_create_mgr(pdata, feature); + mgr = dfl_fme_create_mgr(fdata, feature); if (IS_ERR(mgr)) { dev_err(&pdev->dev, "fail to create fpga mgr pdev\n"); goto unlock; @@ -405,7 +403,7 @@ static int pr_mgmt_init(struct platform_device *pdev, continue; /* Create bridge for each port */ - fme_br = dfl_fme_create_bridge(pdata, i); + fme_br = dfl_fme_create_bridge(fdata, i); if (IS_ERR(fme_br)) { ret = PTR_ERR(fme_br); goto destroy_region; @@ -414,7 +412,7 @@ static int pr_mgmt_init(struct platform_device *pdev, list_add(&fme_br->node, &priv->bridge_list); /* Create region for each port */ - fme_region = dfl_fme_create_region(pdata, mgr, + fme_region = dfl_fme_create_region(fdata, mgr, fme_br->br, i); if (IS_ERR(fme_region)) { ret = PTR_ERR(fme_region); @@ -423,30 +421,30 @@ static int pr_mgmt_init(struct platform_device *pdev, list_add(&fme_region->node, &priv->region_list); } - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return 0; destroy_region: - dfl_fme_destroy_regions(pdata); - dfl_fme_destroy_bridges(pdata); - dfl_fme_destroy_mgr(pdata); + dfl_fme_destroy_regions(fdata); + dfl_fme_destroy_bridges(fdata); + dfl_fme_destroy_mgr(fdata); unlock: - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret; } static void pr_mgmt_uinit(struct platform_device *pdev, struct dfl_feature *feature) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); - dfl_fme_destroy_regions(pdata); - dfl_fme_destroy_bridges(pdata); - dfl_fme_destroy_mgr(pdata); - mutex_unlock(&pdata->lock); + dfl_fme_destroy_regions(fdata); + dfl_fme_destroy_bridges(fdata); + dfl_fme_destroy_mgr(fdata); + mutex_unlock(&fdata->lock); } static long fme_pr_ioctl(struct platform_device *pdev, diff --git a/drivers/fpga/dfl-fme-pr.h b/drivers/fpga/dfl-fme-pr.h index 096a699089d3..761f80f63312 100644 --- a/drivers/fpga/dfl-fme-pr.h +++ b/drivers/fpga/dfl-fme-pr.h @@ -58,7 +58,7 @@ struct dfl_fme_bridge { }; /** - * struct dfl_fme_bridge_pdata - platform data for FME bridge platform device. + * struct dfl_fme_br_pdata - platform data for FME bridge platform device. * * @cdev: container device. * @port_id: port id. diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c index 1eeb42af1012..c6cd63063c82 100644 --- a/drivers/fpga/dfl-fme-region.c +++ b/drivers/fpga/dfl-fme-region.c @@ -30,6 +30,7 @@ static int fme_region_get_bridges(struct fpga_region *region) static int fme_region_probe(struct platform_device *pdev) { struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev); + struct fpga_region_info info = { 0 }; struct device *dev = &pdev->dev; struct fpga_region *region; struct fpga_manager *mgr; @@ -39,20 +40,18 @@ static int fme_region_probe(struct platform_device *pdev) if (IS_ERR(mgr)) return -EPROBE_DEFER; - region = devm_fpga_region_create(dev, mgr, fme_region_get_bridges); - if (!region) { - ret = -ENOMEM; + info.mgr = mgr; + info.compat_id = mgr->compat_id; + info.get_bridges = fme_region_get_bridges; + info.priv = pdata; + region = fpga_region_register_full(dev, &info); + if (IS_ERR(region)) { + ret = PTR_ERR(region); goto eprobe_mgr_put; } - region->priv = pdata; - region->compat_id = mgr->compat_id; platform_set_drvdata(pdev, region); - ret = fpga_region_register(region); - if (ret) - goto eprobe_mgr_put; - dev_dbg(dev, "DFL FME FPGA Region probed\n"); return 0; @@ -62,23 +61,21 @@ eprobe_mgr_put: return ret; } -static int fme_region_remove(struct platform_device *pdev) +static void fme_region_remove(struct platform_device *pdev) { struct fpga_region *region = platform_get_drvdata(pdev); struct fpga_manager *mgr = region->mgr; fpga_region_unregister(region); fpga_mgr_put(mgr); - - return 0; } static struct platform_driver fme_region_driver = { - .driver = { - .name = DFL_FPGA_FME_REGION, + .driver = { + .name = DFL_FPGA_FME_REGION, }, - .probe = fme_region_probe, - .remove = fme_region_remove, + .probe = fme_region_probe, + .remove = fme_region_remove, }; module_platform_driver(fme_region_driver); diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h index 4195dd68193e..a566dbc2b485 100644 --- a/drivers/fpga/dfl-fme.h +++ b/drivers/fpga/dfl-fme.h @@ -24,13 +24,11 @@ * @mgr: FME's FPGA manager platform device. * @region_list: linked list of FME's FPGA regions. * @bridge_list: linked list of FME's FPGA bridges. - * @pdata: fme platform device's pdata. */ struct dfl_fme { struct platform_device *mgr; struct list_head region_list; struct list_head bridge_list; - struct dfl_feature_platform_data *pdata; }; extern const struct dfl_feature_ops fme_pr_mgmt_ops; diff --git a/drivers/fpga/dfl-n3000-nios.c b/drivers/fpga/dfl-n3000-nios.c index 7a95366f6516..9ddf1d1d392f 100644 --- a/drivers/fpga/dfl-n3000-nios.c +++ b/drivers/fpga/dfl-n3000-nios.c @@ -461,7 +461,7 @@ static int n3000_nios_poll_stat_timeout(void __iomem *base, u64 *v) * We don't use the time based timeout here for performance. * * The regbus read/write is on the critical path of Intel PAC N3000 - * image programing. The time based timeout checking will add too much + * image programming. The time based timeout checking will add too much * overhead on it. Usually the state changes in 1 or 2 loops on the * test server, and we set 10000 times loop here for safety. */ diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index b44523ea8c91..602807d6afcc 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -15,12 +15,12 @@ */ #include <linux/pci.h> +#include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/stddef.h> #include <linux/errno.h> -#include <linux/aer.h> #include "dfl.h" @@ -39,14 +39,6 @@ struct cci_drvdata { struct dfl_fpga_cdev *cdev; /* container device */ }; -static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev) -{ - if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME)) - return NULL; - - return pcim_iomap_table(pcidev)[0]; -} - static int cci_pci_alloc_irq(struct pci_dev *pcidev) { int ret, nvec = pci_msix_vec_count(pcidev); @@ -74,11 +66,21 @@ static void cci_pci_free_irq(struct pci_dev *pcidev) #define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4 #define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30 #define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B +#define PCIE_DEVICE_ID_SILICOM_PAC_N5010 0x1000 +#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001 +#define PCIE_DEVICE_ID_INTEL_DFL 0xbcce +/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */ +#define PCIE_SUBDEVICE_ID_INTEL_D5005 0x138d +#define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770 +#define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771 +#define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4 + /* VF Device */ #define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF #define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1 #define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5 #define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C +#define PCIE_DEVICE_ID_INTEL_DFL_VF 0xbccf static struct pci_device_id cci_pcie_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),}, @@ -90,6 +92,22 @@ static struct pci_device_id cci_pcie_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),}, + {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),}, + {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL, + PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_D5005),}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL, + PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF, + PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL, + PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF, + PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL, + PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF, + PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),}, {0,} }; MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl); @@ -133,19 +151,12 @@ static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec) static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info) { - u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res; - int dfl_res_off, i, bars, voff = 0; + u32 bir, offset, dfl_cnt, dfl_res; + int dfl_res_off, i, bars, voff; resource_size_t start, len; - while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) { - vndr_hdr = 0; - pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr); - - if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS && - pcidev->vendor == PCI_VENDOR_ID_INTEL) - break; - } - + voff = pci_find_vsec_capability(pcidev, PCI_VENDOR_ID_INTEL, + PCI_VSEC_ID_INTEL_DFLS); if (!voff) { dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__); return -ENODEV; @@ -216,9 +227,9 @@ static int find_dfls_by_default(struct pci_dev *pcidev, u64 v; /* start to find Device Feature List from Bar 0 */ - base = cci_pci_ioremap_bar0(pcidev); - if (!base) - return -ENOMEM; + base = pcim_iomap_region(pcidev, 0, DRV_NAME); + if (IS_ERR(base)) + return PTR_ERR(base); /* * PF device has FME and Ports/AFUs, and VF device only has one @@ -253,6 +264,15 @@ static int find_dfls_by_default(struct pci_dev *pcidev, */ bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v); offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v); + if (bar == FME_PORT_OFST_BAR_SKIP) { + continue; + } else if (bar >= PCI_STD_NUM_BARS) { + dev_err(&pcidev->dev, "bad BAR %d for port %d\n", + bar, i); + ret = -EINVAL; + break; + } + start = pci_resource_start(pcidev, bar) + offset; len = pci_resource_len(pcidev, bar) - offset; @@ -268,7 +288,7 @@ static int find_dfls_by_default(struct pci_dev *pcidev, } /* release I/O mappings for next step enumeration */ - pcim_iounmap_regions(pcidev, BIT(0)); + pcim_iounmap_region(pcidev, 0); return ret; } @@ -343,41 +363,29 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid) return ret; } - ret = pci_enable_pcie_error_reporting(pcidev); - if (ret && ret != -EINVAL) - dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret); - pci_set_master(pcidev); - if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { - ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); - if (ret) - goto disable_error_report_exit; - } else if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { - ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); - if (ret) - goto disable_error_report_exit; - } else { - ret = -EIO; + ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); + if (ret) + ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); + if (ret) { dev_err(&pcidev->dev, "No suitable DMA support available.\n"); - goto disable_error_report_exit; + return ret; } ret = cci_init_drvdata(pcidev); if (ret) { dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret); - goto disable_error_report_exit; + return ret; } ret = cci_enumerate_feature_devs(pcidev); - if (!ret) + if (ret) { + dev_err(&pcidev->dev, "enumeration failure %d.\n", ret); return ret; + } - dev_err(&pcidev->dev, "enumeration failure %d.\n", ret); - -disable_error_report_exit: - pci_disable_pcie_error_reporting(pcidev); - return ret; + return 0; } static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) @@ -421,7 +429,6 @@ static void cci_pci_remove(struct pci_dev *pcidev) cci_pci_sriov_configure(pcidev, 0); cci_remove_feature_devs(pcidev); - pci_disable_pcie_error_reporting(pcidev); } static struct pci_driver cci_pci_driver = { diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index 511b20ff35a3..7022657243c0 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -13,6 +13,7 @@ #include <linux/dfl.h> #include <linux/fpga-dfl.h> #include <linux/module.h> +#include <linux/overflow.h> #include <linux/uaccess.h> #include "dfl.h" @@ -45,7 +46,7 @@ static const char *dfl_pdata_key_strings[DFL_ID_MAX] = { }; /** - * dfl_dev_info - dfl feature device information. + * struct dfl_dev_info - dfl feature device information. * @name: name string of the feature platform device. * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec. * @id: idr id of the feature dev. @@ -67,7 +68,7 @@ static struct dfl_dev_info dfl_devs[] = { }; /** - * dfl_chardev_info - chardev information of dfl feature device + * struct dfl_chardev_info - chardev information of dfl feature device * @name: nmae string of the char device. * @devt: devt of the char device. */ @@ -118,17 +119,6 @@ static void dfl_id_free(enum dfl_id_type type, int id) mutex_unlock(&dfl_id_mutex); } -static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) - if (!strcmp(dfl_devs[i].name, pdev->name)) - return i; - - return DFL_ID_MAX; -} - static enum dfl_id_type dfh_id_to_type(u16 id) { int i; @@ -155,12 +145,12 @@ static LIST_HEAD(dfl_port_ops_list); /** * dfl_fpga_port_ops_get - get matched port ops from the global list - * @pdev: platform device to match with associated port ops. + * @fdata: feature dev data to match with associated port ops. * Return: matched port ops on success, NULL otherwise. * * Please note that must dfl_fpga_port_ops_put after use the port_ops. */ -struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev) +struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata) { struct dfl_fpga_port_ops *ops = NULL; @@ -170,7 +160,7 @@ struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev) list_for_each_entry(ops, &dfl_port_ops_list, node) { /* match port_ops using the name of platform device */ - if (!strcmp(pdev->name, ops->name)) { + if (!strcmp(fdata->pdev_name, ops->name)) { if (!try_module_get(ops->owner)) ops = NULL; goto done; @@ -221,27 +211,26 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del); /** * dfl_fpga_check_port_id - check the port id - * @pdev: port platform device. + * @fdata: port feature dev data. * @pport_id: port id to compare. * * Return: 1 if port device matches with given port id, otherwise 0. */ -int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id) +int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); struct dfl_fpga_port_ops *port_ops; - if (pdata->id != FEATURE_DEV_ID_UNUSED) - return pdata->id == *(int *)pport_id; + if (fdata->id != FEATURE_DEV_ID_UNUSED) + return fdata->id == *(int *)pport_id; - port_ops = dfl_fpga_port_ops_get(pdev); + port_ops = dfl_fpga_port_ops_get(fdata); if (!port_ops || !port_ops->get_id) return 0; - pdata->id = port_ops->get_id(pdev); + fdata->id = port_ops->get_id(fdata); dfl_fpga_port_ops_put(port_ops); - return pdata->id == *(int *)pport_id; + return fdata->id == *(int *)pport_id; } EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id); @@ -256,10 +245,10 @@ dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev) return NULL; } -static int dfl_bus_match(struct device *dev, struct device_driver *drv) +static int dfl_bus_match(struct device *dev, const struct device_driver *drv) { struct dfl_device *ddev = to_dfl_dev(dev); - struct dfl_driver *ddrv = to_dfl_drv(drv); + const struct dfl_driver *ddrv = to_dfl_drv(drv); const struct dfl_device_id *id_entry; id_entry = ddrv->id_table; @@ -284,20 +273,18 @@ static int dfl_bus_probe(struct device *dev) return ddrv->probe(ddev); } -static int dfl_bus_remove(struct device *dev) +static void dfl_bus_remove(struct device *dev) { struct dfl_driver *ddrv = to_dfl_drv(dev->driver); struct dfl_device *ddev = to_dfl_dev(dev); if (ddrv->remove) ddrv->remove(ddev); - - return 0; } -static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env) +static int dfl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct dfl_device *ddev = to_dfl_dev(dev); + const struct dfl_device *ddev = to_dfl_dev(dev); return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X", ddev->type, ddev->feature_id); @@ -328,7 +315,7 @@ static struct attribute *dfl_dev_attrs[] = { }; ATTRIBUTE_GROUPS(dfl_dev); -static struct bus_type dfl_bus_type = { +static const struct bus_type dfl_bus_type = { .name = "dfl", .match = dfl_bus_match, .probe = dfl_bus_probe, @@ -344,16 +331,18 @@ static void release_dfl_dev(struct device *dev) if (ddev->mmio_res.parent) release_resource(&ddev->mmio_res); - ida_simple_remove(&dfl_device_ida, ddev->id); + kfree(ddev->params); + + ida_free(&dfl_device_ida, ddev->id); kfree(ddev->irqs); kfree(ddev); } static struct dfl_device * -dfl_dev_add(struct dfl_feature_platform_data *pdata, +dfl_dev_add(struct dfl_feature_dev_data *fdata, struct dfl_feature *feature) { - struct platform_device *pdev = pdata->dev; + struct platform_device *pdev = fdata->dev; struct resource *parent_res; struct dfl_device *ddev; int id, i, ret; @@ -362,7 +351,7 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata, if (!ddev) return ERR_PTR(-ENOMEM); - id = ida_simple_get(&dfl_device_ida, 0, 0, GFP_KERNEL); + id = ida_alloc(&dfl_device_ida, GFP_KERNEL); if (id < 0) { dev_err(&pdev->dev, "unable to get id\n"); kfree(ddev); @@ -379,9 +368,19 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata, if (ret) goto put_dev; - ddev->type = feature_dev_id_type(pdev); + ddev->type = fdata->type; ddev->feature_id = feature->id; - ddev->cdev = pdata->dfl_cdev; + ddev->revision = feature->revision; + ddev->dfh_version = feature->dfh_version; + ddev->cdev = fdata->dfl_cdev; + if (feature->param_size) { + ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL); + if (!ddev->params) { + ret = -ENOMEM; + goto put_dev; + } + ddev->param_size = feature->param_size; + } /* add mmio resource */ parent_res = &pdev->resource[feature->resource_index]; @@ -424,11 +423,11 @@ put_dev: return ERR_PTR(ret); } -static void dfl_devs_remove(struct dfl_feature_platform_data *pdata) +static void dfl_devs_remove(struct dfl_feature_dev_data *fdata) { struct dfl_feature *feature; - dfl_fpga_dev_for_each_feature(pdata, feature) { + dfl_fpga_dev_for_each_feature(fdata, feature) { if (feature->ddev) { device_unregister(&feature->ddev->dev); feature->ddev = NULL; @@ -436,13 +435,13 @@ static void dfl_devs_remove(struct dfl_feature_platform_data *pdata) } } -static int dfl_devs_add(struct dfl_feature_platform_data *pdata) +static int dfl_devs_add(struct dfl_feature_dev_data *fdata) { struct dfl_feature *feature; struct dfl_device *ddev; int ret; - dfl_fpga_dev_for_each_feature(pdata, feature) { + dfl_fpga_dev_for_each_feature(fdata, feature) { if (feature->ioaddr) continue; @@ -451,7 +450,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata) goto err; } - ddev = dfl_dev_add(pdata, feature); + ddev = dfl_dev_add(fdata, feature); if (IS_ERR(ddev)) { ret = PTR_ERR(ddev); goto err; @@ -463,7 +462,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata) return 0; err: - dfl_devs_remove(pdata); + dfl_devs_remove(fdata); return ret; } @@ -493,12 +492,12 @@ EXPORT_SYMBOL(dfl_driver_unregister); */ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); struct dfl_feature *feature; - dfl_devs_remove(pdata); + dfl_devs_remove(fdata); - dfl_fpga_dev_for_each_feature(pdata, feature) { + dfl_fpga_dev_for_each_feature(fdata, feature) { if (feature->ops) { if (feature->ops->uinit) feature->ops->uinit(pdev, feature); @@ -509,7 +508,6 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev) EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit); static int dfl_feature_instance_init(struct platform_device *pdev, - struct dfl_feature_platform_data *pdata, struct dfl_feature *feature, struct dfl_feature_driver *drv) { @@ -568,16 +566,15 @@ static bool dfl_feature_drv_match(struct dfl_feature *feature, int dfl_fpga_dev_feature_init(struct platform_device *pdev, struct dfl_feature_driver *feature_drvs) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); struct dfl_feature_driver *drv = feature_drvs; struct dfl_feature *feature; int ret; while (drv->ops) { - dfl_fpga_dev_for_each_feature(pdata, feature) { + dfl_fpga_dev_for_each_feature(fdata, feature) { if (dfl_feature_drv_match(feature, drv)) { - ret = dfl_feature_instance_init(pdev, pdata, - feature, drv); + ret = dfl_feature_instance_init(pdev, feature, drv); if (ret) goto exit; } @@ -585,7 +582,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev, drv++; } - ret = dfl_devs_add(pdata); + ret = dfl_devs_add(fdata); if (ret) goto exit; @@ -684,7 +681,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister); * @nr_irqs: number of irqs for all feature devices. * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of * this device. - * @feature_dev: current feature device. + * @type: the current FIU type. * @ioaddr: header register region address of current FIU in enumeration. * @start: register resource start of current FIU. * @len: max register resource length of current FIU. @@ -697,7 +694,7 @@ struct build_feature_devs_info { unsigned int nr_irqs; int *irq_table; - struct platform_device *feature_dev; + enum dfl_id_type type; void __iomem *ioaddr; resource_size_t start; resource_size_t len; @@ -709,65 +706,85 @@ struct build_feature_devs_info { * struct dfl_feature_info - sub feature info collected during feature dev build * * @fid: id of this sub feature. + * @revision: revision of this sub feature + * @dfh_version: version of Device Feature Header (DFH) * @mmio_res: mmio resource of this sub feature. * @ioaddr: mapped base address of mmio resource. * @node: node in sub_features linked list. * @irq_base: start of irq index in this sub feature. * @nr_irqs: number of irqs of this sub feature. + * @param_size: size DFH parameters. + * @params: DFH parameter data. */ struct dfl_feature_info { u16 fid; + u8 revision; + u8 dfh_version; struct resource mmio_res; void __iomem *ioaddr; struct list_head node; unsigned int irq_base; unsigned int nr_irqs; + unsigned int param_size; + u64 params[]; }; -static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev, - struct platform_device *port) +static void dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev *cdev, + struct dfl_feature_dev_data *fdata) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev); - mutex_lock(&cdev->lock); - list_add(&pdata->node, &cdev->port_dev_list); - get_device(&pdata->dev->dev); + list_add(&fdata->node, &cdev->port_dev_list); mutex_unlock(&cdev->lock); } -/* - * register current feature device, it is called when we need to switch to - * another feature parsing or we have parsed all features on given device - * feature list. - */ -static int build_info_commit_dev(struct build_feature_devs_info *binfo) +static void dfl_id_free_action(void *arg) +{ + struct dfl_feature_dev_data *fdata = arg; + + dfl_id_free(fdata->type, fdata->pdev_id); +} + +static struct dfl_feature_dev_data * +binfo_create_feature_dev_data(struct build_feature_devs_info *binfo) { - struct platform_device *fdev = binfo->feature_dev; - struct dfl_feature_platform_data *pdata; + enum dfl_id_type type = binfo->type; struct dfl_feature_info *finfo, *p; - enum dfl_id_type type; + struct dfl_feature_dev_data *fdata; int ret, index = 0, res_idx = 0; - type = feature_dev_id_type(fdev); if (WARN_ON_ONCE(type >= DFL_ID_MAX)) - return -EINVAL; + return ERR_PTR(-EINVAL); - /* - * we do not need to care for the memory which is associated with - * the platform device. After calling platform_device_unregister(), - * it will be automatically freed by device's release() callback, - * platform_device_release(). - */ - pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL); - if (!pdata) - return -ENOMEM; + fdata = devm_kzalloc(binfo->dev, sizeof(*fdata), GFP_KERNEL); + if (!fdata) + return ERR_PTR(-ENOMEM); - pdata->dev = fdev; - pdata->num = binfo->feature_num; - pdata->dfl_cdev = binfo->cdev; - pdata->id = FEATURE_DEV_ID_UNUSED; - mutex_init(&pdata->lock); - lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type], + fdata->features = devm_kcalloc(binfo->dev, binfo->feature_num, + sizeof(*fdata->features), GFP_KERNEL); + if (!fdata->features) + return ERR_PTR(-ENOMEM); + + fdata->resources = devm_kcalloc(binfo->dev, binfo->feature_num, + sizeof(*fdata->resources), GFP_KERNEL); + if (!fdata->resources) + return ERR_PTR(-ENOMEM); + + fdata->type = type; + + fdata->pdev_id = dfl_id_alloc(type, binfo->dev); + if (fdata->pdev_id < 0) + return ERR_PTR(fdata->pdev_id); + + ret = devm_add_action_or_reset(binfo->dev, dfl_id_free_action, fdata); + if (ret) + return ERR_PTR(ret); + + fdata->pdev_name = dfl_devs[type].name; + fdata->num = binfo->feature_num; + fdata->dfl_cdev = binfo->cdev; + fdata->id = FEATURE_DEV_ID_UNUSED; + mutex_init(&fdata->lock); + lockdep_set_class_and_name(&fdata->lock, &dfl_pdata_keys[type], dfl_pdata_key_strings[type]); /* @@ -776,27 +793,28 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) * works properly for port device. * and it should always be 0 for fme device. */ - WARN_ON(pdata->disable_count); - - fdev->dev.platform_data = pdata; - - /* each sub feature has one MMIO resource */ - fdev->num_resources = binfo->feature_num; - fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource), - GFP_KERNEL); - if (!fdev->resource) - return -ENOMEM; + WARN_ON(fdata->disable_count); /* fill features and resource information for feature dev */ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { - struct dfl_feature *feature = &pdata->features[index++]; + struct dfl_feature *feature = &fdata->features[index++]; struct dfl_feature_irq_ctx *ctx; unsigned int i; /* save resource information for each feature */ - feature->dev = fdev; feature->id = finfo->fid; + feature->revision = finfo->revision; + feature->dfh_version = finfo->dfh_version; + + if (finfo->param_size) { + feature->params = devm_kmemdup(binfo->dev, + finfo->params, finfo->param_size, + GFP_KERNEL); + if (!feature->params) + return ERR_PTR(-ENOMEM); + feature->param_size = finfo->param_size; + } /* * the FIU header feature has some fundamental functions (sriov * set, port enable/disable) needed for the dfl bus device and @@ -810,17 +828,17 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) devm_ioremap_resource(binfo->dev, &finfo->mmio_res); if (IS_ERR(feature->ioaddr)) - return PTR_ERR(feature->ioaddr); + return ERR_CAST(feature->ioaddr); } else { feature->resource_index = res_idx; - fdev->resource[res_idx++] = finfo->mmio_res; + fdata->resources[res_idx++] = finfo->mmio_res; } if (finfo->nr_irqs) { ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs, sizeof(*ctx), GFP_KERNEL); if (!ctx) - return -ENOMEM; + return ERR_PTR(-ENOMEM); for (i = 0; i < finfo->nr_irqs; i++) ctx[i].irq = @@ -834,55 +852,94 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) kfree(finfo); } - ret = platform_device_add(binfo->feature_dev); - if (!ret) { - if (type == PORT_ID) - dfl_fpga_cdev_add_port_dev(binfo->cdev, - binfo->feature_dev); - else - binfo->cdev->fme_dev = - get_device(&binfo->feature_dev->dev); - /* - * reset it to avoid build_info_free() freeing their resource. - * - * The resource of successfully registered feature devices - * will be freed by platform_device_unregister(). See the - * comments in build_info_create_dev(). - */ - binfo->feature_dev = NULL; - } + fdata->resource_num = res_idx; - return ret; + return fdata; } -static int -build_info_create_dev(struct build_feature_devs_info *binfo, - enum dfl_id_type type) +/* + * register current feature device, it is called when we need to switch to + * another feature parsing or we have parsed all features on given device + * feature list. + */ +static int feature_dev_register(struct dfl_feature_dev_data *fdata) { + struct dfl_feature_platform_data pdata = {}; struct platform_device *fdev; + struct dfl_feature *feature; + int ret; - if (type >= DFL_ID_MAX) - return -EINVAL; - - /* - * we use -ENODEV as the initialization indicator which indicates - * whether the id need to be reclaimed - */ - fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV); + fdev = platform_device_alloc(fdata->pdev_name, fdata->pdev_id); if (!fdev) return -ENOMEM; - binfo->feature_dev = fdev; - binfo->feature_num = 0; + fdata->dev = fdev; - INIT_LIST_HEAD(&binfo->sub_features); + fdev->dev.parent = &fdata->dfl_cdev->region->dev; + fdev->dev.devt = dfl_get_devt(dfl_devs[fdata->type].devt_type, fdev->id); + + dfl_fpga_dev_for_each_feature(fdata, feature) + feature->dev = fdev; + + ret = platform_device_add_resources(fdev, fdata->resources, + fdata->resource_num); + if (ret) + goto err_put_dev; + + pdata.fdata = fdata; + ret = platform_device_add_data(fdev, &pdata, sizeof(pdata)); + if (ret) + goto err_put_dev; + + ret = platform_device_add(fdev); + if (ret) + goto err_put_dev; + + return 0; + +err_put_dev: + platform_device_put(fdev); + + fdata->dev = NULL; + + dfl_fpga_dev_for_each_feature(fdata, feature) + feature->dev = NULL; + + return ret; +} + +static void feature_dev_unregister(struct dfl_feature_dev_data *fdata) +{ + struct dfl_feature *feature; + + platform_device_unregister(fdata->dev); + + fdata->dev = NULL; + + dfl_fpga_dev_for_each_feature(fdata, feature) + feature->dev = NULL; +} + +static int build_info_commit_dev(struct build_feature_devs_info *binfo) +{ + struct dfl_feature_dev_data *fdata; + int ret; + + fdata = binfo_create_feature_dev_data(binfo); + if (IS_ERR(fdata)) + return PTR_ERR(fdata); - fdev->id = dfl_id_alloc(type, &fdev->dev); - if (fdev->id < 0) - return fdev->id; + ret = feature_dev_register(fdata); + if (ret) + return ret; - fdev->dev.parent = &binfo->cdev->region->dev; - fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id); + if (binfo->type == PORT_ID) + dfl_fpga_cdev_add_port_data(binfo->cdev, fdata); + else + binfo->cdev->fme_dev = get_device(&fdata->dev->dev); + + /* reset the binfo for next FIU */ + binfo->type = DFL_ID_MAX; return 0; } @@ -891,38 +948,25 @@ static void build_info_free(struct build_feature_devs_info *binfo) { struct dfl_feature_info *finfo, *p; - /* - * it is a valid id, free it. See comments in - * build_info_create_dev() - */ - if (binfo->feature_dev && binfo->feature_dev->id >= 0) { - dfl_id_free(feature_dev_id_type(binfo->feature_dev), - binfo->feature_dev->id); - - list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { - list_del(&finfo->node); - kfree(finfo); - } + list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { + list_del(&finfo->node); + kfree(finfo); } - platform_device_put(binfo->feature_dev); - devm_kfree(binfo->dev, binfo); } -static inline u32 feature_size(void __iomem *start) +static inline u32 feature_size(u64 value) { - u64 v = readq(start + DFH); - u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v); + u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, value); /* workaround for private features with invalid size, use 4K instead */ return ofst ? ofst : 4096; } -static u16 feature_id(void __iomem *start) +static u16 feature_id(u64 value) { - u64 v = readq(start + DFH); - u16 id = FIELD_GET(DFH_ID, v); - u8 type = FIELD_GET(DFH_TYPE, v); + u16 id = FIELD_GET(DFH_ID, value); + u8 type = FIELD_GET(DFH_TYPE, value); if (type == DFH_TYPE_FIU) return FEATURE_ID_FIU_HEADER; @@ -935,50 +979,115 @@ static u16 feature_id(void __iomem *start) return 0; } +static u64 *find_param(u64 *params, resource_size_t max, int param_id) +{ + u64 *end = params + max / sizeof(u64); + u64 v, next; + + while (params < end) { + v = *params; + if (param_id == FIELD_GET(DFHv1_PARAM_HDR_ID, v)) + return params; + + if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v)) + break; + + next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v); + params += next; + } + + return NULL; +} + +/** + * dfh_find_param() - find parameter block for the given parameter id + * @dfl_dev: dfl device + * @param_id: id of dfl parameter + * @psize: destination to store size of parameter data in bytes + * + * Return: pointer to start of parameter data, PTR_ERR otherwise. + */ +void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *psize) +{ + u64 *phdr = find_param(dfl_dev->params, dfl_dev->param_size, param_id); + + if (!phdr) + return ERR_PTR(-ENOENT); + + if (psize) + *psize = (FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, *phdr) - 1) * sizeof(u64); + + return phdr + 1; +} +EXPORT_SYMBOL_GPL(dfh_find_param); + static int parse_feature_irqs(struct build_feature_devs_info *binfo, - resource_size_t ofst, u16 fid, - unsigned int *irq_base, unsigned int *nr_irqs) + resource_size_t ofst, struct dfl_feature_info *finfo) { void __iomem *base = binfo->ioaddr + ofst; unsigned int i, ibase, inr = 0; + void *params = finfo->params; + enum dfl_id_type type; + u16 fid = finfo->fid; int virq; + u64 *p; u64 v; - /* - * Ideally DFL framework should only read info from DFL header, but - * current version DFL only provides mmio resources information for - * each feature in DFL Header, no field for interrupt resources. - * Interrupt resource information is provided by specific mmio - * registers of each private feature which supports interrupt. So in - * order to parse and assign irq resources, DFL framework has to look - * into specific capability registers of these private features. - * - * Once future DFL version supports generic interrupt resource - * information in common DFL headers, the generic interrupt parsing - * code will be added. But in order to be compatible to old version - * DFL, the driver may still fall back to these quirks. - */ - switch (fid) { - case PORT_FEATURE_ID_UINT: - v = readq(base + PORT_UINT_CAP); - ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v); - inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v); + switch (finfo->dfh_version) { + case 0: + /* + * DFHv0 only provides MMIO resource information for each feature + * in the DFL header. There is no generic interrupt information. + * Instead, features with interrupt functionality provide + * the information in feature specific registers. + */ + type = binfo->type; + if (type == PORT_ID) { + switch (fid) { + case PORT_FEATURE_ID_UINT: + v = readq(base + PORT_UINT_CAP); + ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v); + inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v); + break; + case PORT_FEATURE_ID_ERROR: + v = readq(base + PORT_ERROR_CAP); + ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v); + inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v); + break; + } + } else if (type == FME_ID) { + switch (fid) { + case FME_FEATURE_ID_GLOBAL_ERR: + v = readq(base + FME_ERROR_CAP); + ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v); + inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v); + break; + } + } break; - case PORT_FEATURE_ID_ERROR: - v = readq(base + PORT_ERROR_CAP); - ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v); - inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v); + + case 1: + /* + * DFHv1 provides interrupt resource information in DFHv1 + * parameter blocks. + */ + p = find_param(params, finfo->param_size, DFHv1_PARAM_ID_MSI_X); + if (!p) + break; + + p++; + ibase = FIELD_GET(DFHv1_PARAM_MSI_X_STARTV, *p); + inr = FIELD_GET(DFHv1_PARAM_MSI_X_NUMV, *p); break; - case FME_FEATURE_ID_GLOBAL_ERR: - v = readq(base + FME_ERROR_CAP); - ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v); - inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v); + + default: + dev_warn(binfo->dev, "unexpected DFH version %d\n", finfo->dfh_version); break; } if (!inr) { - *irq_base = 0; - *nr_irqs = 0; + finfo->irq_base = 0; + finfo->nr_irqs = 0; return 0; } @@ -1001,12 +1110,37 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo, } } - *irq_base = ibase; - *nr_irqs = inr; + finfo->irq_base = ibase; + finfo->nr_irqs = inr; return 0; } +static int dfh_get_param_size(void __iomem *dfh_base, resource_size_t max) +{ + int size = 0; + u64 v, next; + + if (!FIELD_GET(DFHv1_CSR_SIZE_GRP_HAS_PARAMS, + readq(dfh_base + DFHv1_CSR_SIZE_GRP))) + return 0; + + while (size + DFHv1_PARAM_HDR < max) { + v = readq(dfh_base + DFHv1_PARAM_HDR + size); + + next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v); + if (!next) + return -EINVAL; + + size += next * sizeof(u64); + + if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v)) + return size; + } + + return -ENOENT; +} + /* * when create sub feature instances, for private features, it doesn't need * to provide resource size and feature id as they could be read from DFH @@ -1018,31 +1152,69 @@ static int create_feature_instance(struct build_feature_devs_info *binfo, resource_size_t ofst, resource_size_t size, u16 fid) { - unsigned int irq_base, nr_irqs; struct dfl_feature_info *finfo; + resource_size_t start, end; + int dfh_psize = 0; + u8 revision = 0; + u64 v, addr_off; + u8 dfh_ver = 0; int ret; - /* read feature size and id if inputs are invalid */ - size = size ? size : feature_size(binfo->ioaddr + ofst); - fid = fid ? fid : feature_id(binfo->ioaddr + ofst); + if (fid != FEATURE_ID_AFU) { + v = readq(binfo->ioaddr + ofst); + revision = FIELD_GET(DFH_REVISION, v); + dfh_ver = FIELD_GET(DFH_VERSION, v); + /* read feature size and id if inputs are invalid */ + size = size ? size : feature_size(v); + fid = fid ? fid : feature_id(v); + if (dfh_ver == 1) { + dfh_psize = dfh_get_param_size(binfo->ioaddr + ofst, size); + if (dfh_psize < 0) { + dev_err(binfo->dev, + "failed to read size of DFHv1 parameters %d\n", + dfh_psize); + return dfh_psize; + } + dev_dbg(binfo->dev, "dfhv1_psize %d\n", dfh_psize); + } + } if (binfo->len - ofst < size) return -EINVAL; - ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs); - if (ret) - return ret; - - finfo = kzalloc(sizeof(*finfo), GFP_KERNEL); + finfo = kzalloc(struct_size(finfo, params, dfh_psize / sizeof(u64)), GFP_KERNEL); if (!finfo) return -ENOMEM; + memcpy_fromio(finfo->params, binfo->ioaddr + ofst + DFHv1_PARAM_HDR, dfh_psize); + finfo->param_size = dfh_psize; + finfo->fid = fid; - finfo->mmio_res.start = binfo->start + ofst; - finfo->mmio_res.end = finfo->mmio_res.start + size - 1; + finfo->revision = revision; + finfo->dfh_version = dfh_ver; + if (dfh_ver == 1) { + v = readq(binfo->ioaddr + ofst + DFHv1_CSR_ADDR); + addr_off = FIELD_GET(DFHv1_CSR_ADDR_MASK, v); + if (FIELD_GET(DFHv1_CSR_ADDR_REL, v)) + start = addr_off << 1; + else + start = binfo->start + ofst + addr_off; + + v = readq(binfo->ioaddr + ofst + DFHv1_CSR_SIZE_GRP); + end = start + FIELD_GET(DFHv1_CSR_SIZE_GRP_SIZE, v) - 1; + } else { + start = binfo->start + ofst; + end = start + size - 1; + } finfo->mmio_res.flags = IORESOURCE_MEM; - finfo->irq_base = irq_base; - finfo->nr_irqs = nr_irqs; + finfo->mmio_res.start = start; + finfo->mmio_res.end = end; + + ret = parse_feature_irqs(binfo, ofst, finfo); + if (ret) { + kfree(finfo); + return ret; + } list_add_tail(&finfo->node, &binfo->sub_features); binfo->feature_num++; @@ -1061,7 +1233,7 @@ static int parse_feature_port_afu(struct build_feature_devs_info *binfo, return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU); } -#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev) +#define is_feature_dev_detected(binfo) ((binfo)->type != DFL_ID_MAX) static int parse_feature_afu(struct build_feature_devs_info *binfo, resource_size_t ofst) @@ -1071,12 +1243,11 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo, return -EINVAL; } - switch (feature_dev_id_type(binfo->feature_dev)) { + switch (binfo->type) { case PORT_ID: return parse_feature_port_afu(binfo, ofst); default: - dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n", - binfo->feature_dev->name); + dev_info(binfo->dev, "AFU belonging to FIU is not supported yet.\n"); } return 0; @@ -1117,6 +1288,7 @@ static void build_info_complete(struct build_feature_devs_info *binfo) static int parse_feature_fiu(struct build_feature_devs_info *binfo, resource_size_t ofst) { + enum dfl_id_type type; int ret = 0; u32 offset; u16 id; @@ -1138,10 +1310,13 @@ static int parse_feature_fiu(struct build_feature_devs_info *binfo, v = readq(binfo->ioaddr + DFH); id = FIELD_GET(DFH_ID, v); - /* create platform device for dfl feature dev */ - ret = build_info_create_dev(binfo, dfh_id_to_type(id)); - if (ret) - return ret; + type = dfh_id_to_type(id); + if (type >= DFL_ID_MAX) + return -EINVAL; + + binfo->type = type; + binfo->feature_num = 0; + INIT_LIST_HEAD(&binfo->sub_features); ret = create_feature_instance(binfo, 0, 0, 0); if (ret) @@ -1166,7 +1341,7 @@ static int parse_feature_private(struct build_feature_devs_info *binfo, { if (!is_feature_dev_detected(binfo)) { dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n", - feature_id(binfo->ioaddr + ofst)); + feature_id(readq(binfo->ioaddr + ofst))); return -EINVAL; } @@ -1359,13 +1534,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq); static int remove_feature_dev(struct device *dev, void *data) { - struct platform_device *pdev = to_platform_device(dev); - enum dfl_id_type type = feature_dev_id_type(pdev); - int id = pdev->id; - - platform_device_unregister(pdev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); - dfl_id_free(type, id); + feature_dev_unregister(fdata); return 0; } @@ -1400,19 +1571,15 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info) if (!cdev) return ERR_PTR(-ENOMEM); - cdev->region = devm_fpga_region_create(info->dev, NULL, NULL); - if (!cdev->region) { - ret = -ENOMEM; - goto free_cdev_exit; - } - cdev->parent = info->dev; mutex_init(&cdev->lock); INIT_LIST_HEAD(&cdev->port_dev_list); - ret = fpga_region_register(cdev->region); - if (ret) + cdev->region = fpga_region_register(info->dev, NULL, NULL); + if (IS_ERR(cdev->region)) { + ret = PTR_ERR(cdev->region); goto free_cdev_exit; + } /* create and init build info for enumeration */ binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL); @@ -1421,6 +1588,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info) goto unregister_region_exit; } + binfo->type = DFL_ID_MAX; binfo->dev = info->dev; binfo->cdev = cdev; @@ -1462,25 +1630,10 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate); */ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev) { - struct dfl_feature_platform_data *pdata, *ptmp; - mutex_lock(&cdev->lock); if (cdev->fme_dev) put_device(cdev->fme_dev); - list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) { - struct platform_device *port_dev = pdata->dev; - - /* remove released ports */ - if (!device_is_registered(&port_dev->dev)) { - dfl_id_free(feature_dev_id_type(port_dev), - port_dev->id); - platform_device_put(port_dev); - } - - list_del(&pdata->node); - put_device(&port_dev->dev); - } mutex_unlock(&cdev->lock); remove_feature_devs(cdev); @@ -1491,7 +1644,7 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev) EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove); /** - * __dfl_fpga_cdev_find_port - find a port under given container device + * __dfl_fpga_cdev_find_port_data - find a port under given container device * * @cdev: container device * @data: data passed to match function @@ -1504,23 +1657,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove); * * NOTE: you will need to drop the device reference with put_device() after use. */ -struct platform_device * -__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data, - int (*match)(struct platform_device *, void *)) +struct dfl_feature_dev_data * +__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data, + int (*match)(struct dfl_feature_dev_data *, void *)) { - struct dfl_feature_platform_data *pdata; - struct platform_device *port_dev; - - list_for_each_entry(pdata, &cdev->port_dev_list, node) { - port_dev = pdata->dev; + struct dfl_feature_dev_data *fdata; - if (match(port_dev, data) && get_device(&port_dev->dev)) - return port_dev; + list_for_each_entry(fdata, &cdev->port_dev_list, node) { + if (match(fdata, data)) + return fdata; } return NULL; } -EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port); +EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port_data); static int __init dfl_fpga_init(void) { @@ -1554,33 +1704,28 @@ static int __init dfl_fpga_init(void) */ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id) { - struct dfl_feature_platform_data *pdata; - struct platform_device *port_pdev; + struct dfl_feature_dev_data *fdata; int ret = -ENODEV; mutex_lock(&cdev->lock); - port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, - dfl_fpga_check_port_id); - if (!port_pdev) + fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id, + dfl_fpga_check_port_id); + if (!fdata) goto unlock_exit; - if (!device_is_registered(&port_pdev->dev)) { + if (!fdata->dev) { ret = -EBUSY; - goto put_dev_exit; + goto unlock_exit; } - pdata = dev_get_platdata(&port_pdev->dev); - - mutex_lock(&pdata->lock); - ret = dfl_feature_dev_use_begin(pdata, true); - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + ret = dfl_feature_dev_use_begin(fdata, true); + mutex_unlock(&fdata->lock); if (ret) - goto put_dev_exit; + goto unlock_exit; - platform_device_del(port_pdev); + feature_dev_unregister(fdata); cdev->released_port_num++; -put_dev_exit: - put_device(&port_pdev->dev); unlock_exit: mutex_unlock(&cdev->lock); return ret; @@ -1600,34 +1745,29 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port); */ int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id) { - struct dfl_feature_platform_data *pdata; - struct platform_device *port_pdev; + struct dfl_feature_dev_data *fdata; int ret = -ENODEV; mutex_lock(&cdev->lock); - port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, - dfl_fpga_check_port_id); - if (!port_pdev) + fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id, + dfl_fpga_check_port_id); + if (!fdata) goto unlock_exit; - if (device_is_registered(&port_pdev->dev)) { + if (fdata->dev) { ret = -EBUSY; - goto put_dev_exit; + goto unlock_exit; } - ret = platform_device_add(port_pdev); + ret = feature_dev_register(fdata); if (ret) - goto put_dev_exit; - - pdata = dev_get_platdata(&port_pdev->dev); + goto unlock_exit; - mutex_lock(&pdata->lock); - dfl_feature_dev_use_end(pdata); - mutex_unlock(&pdata->lock); + mutex_lock(&fdata->lock); + dfl_feature_dev_use_end(fdata); + mutex_unlock(&fdata->lock); cdev->released_port_num--; -put_dev_exit: - put_device(&port_pdev->dev); unlock_exit: mutex_unlock(&cdev->lock); return ret; @@ -1637,10 +1777,11 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port); static void config_port_access_mode(struct device *fme_dev, int port_id, bool is_vf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(fme_dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); v = readq(base + FME_HDR_PORT_OFST(port_id)); @@ -1664,14 +1805,14 @@ static void config_port_access_mode(struct device *fme_dev, int port_id, */ void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev) { - struct dfl_feature_platform_data *pdata; + struct dfl_feature_dev_data *fdata; mutex_lock(&cdev->lock); - list_for_each_entry(pdata, &cdev->port_dev_list, node) { - if (device_is_registered(&pdata->dev->dev)) + list_for_each_entry(fdata, &cdev->port_dev_list, node) { + if (fdata->dev) continue; - config_port_pf_mode(cdev->fme_dev, pdata->id); + config_port_pf_mode(cdev->fme_dev, fdata->id); } mutex_unlock(&cdev->lock); } @@ -1690,7 +1831,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf); */ int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs) { - struct dfl_feature_platform_data *pdata; + struct dfl_feature_dev_data *fdata; int ret = 0; mutex_lock(&cdev->lock); @@ -1704,11 +1845,11 @@ int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs) goto done; } - list_for_each_entry(pdata, &cdev->port_dev_list, node) { - if (device_is_registered(&pdata->dev->dev)) + list_for_each_entry(fdata, &cdev->port_dev_list, node) { + if (fdata->dev) continue; - config_port_vf_mode(cdev->fme_dev, pdata->id); + config_port_vf_mode(cdev->fme_dev, fdata->id); } done: mutex_unlock(&cdev->lock); @@ -1720,7 +1861,7 @@ static irqreturn_t dfl_irq_handler(int irq, void *arg) { struct eventfd_ctx *trigger = arg; - eventfd_signal(trigger, 1); + eventfd_signal(trigger); return IRQ_HANDLED; } @@ -1841,7 +1982,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev, struct dfl_feature *feature, unsigned long arg) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); struct dfl_fpga_irq_set hdr; s32 *fds; long ret; @@ -1856,14 +1997,14 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev, (hdr.start + hdr.count < hdr.start)) return -EINVAL; - fds = memdup_user((void __user *)(arg + sizeof(hdr)), - hdr.count * sizeof(s32)); + fds = memdup_array_user((void __user *)(arg + sizeof(hdr)), + hdr.count, sizeof(s32)); if (IS_ERR(fds)) return PTR_ERR(fds); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); kfree(fds); return ret; diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index 2b82c96ba56c..95539f1213cb 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -17,6 +17,7 @@ #include <linux/bitfield.h> #include <linux/cdev.h> #include <linux/delay.h> +#include <linux/dfl.h> #include <linux/eventfd.h> #include <linux/fs.h> #include <linux/interrupt.h> @@ -74,11 +75,47 @@ #define DFH_REVISION GENMASK_ULL(15, 12) /* Feature revision */ #define DFH_NEXT_HDR_OFST GENMASK_ULL(39, 16) /* Offset to next DFH */ #define DFH_EOL BIT_ULL(40) /* End of list */ +#define DFH_VERSION GENMASK_ULL(59, 52) /* DFH version */ #define DFH_TYPE GENMASK_ULL(63, 60) /* Feature type */ #define DFH_TYPE_AFU 1 #define DFH_TYPE_PRIVATE 3 #define DFH_TYPE_FIU 4 +/* + * DFHv1 Register Offset definitons + * In DHFv1, DFH + GUID + CSR_START + CSR_SIZE_GROUP + PARAM_HDR + PARAM_DATA + * as common header registers + */ +#define DFHv1_CSR_ADDR 0x18 /* CSR Register start address */ +#define DFHv1_CSR_SIZE_GRP 0x20 /* Size of Reg Block and Group/tag */ +#define DFHv1_PARAM_HDR 0x28 /* Optional First Param header */ + +/* + * CSR Rel Bit, 1'b0 = relative (offset from feature DFH start), + * 1'b1 = absolute (ARM or other non-PCIe use) + */ +#define DFHv1_CSR_ADDR_REL BIT_ULL(0) + +/* CSR Header Register Bit Definitions */ +#define DFHv1_CSR_ADDR_MASK GENMASK_ULL(63, 1) /* 63:1 of CSR address */ + +/* CSR SIZE Goup Register Bit Definitions */ +#define DFHv1_CSR_SIZE_GRP_INSTANCE_ID GENMASK_ULL(15, 0) /* Enumeration instantiated IP */ +#define DFHv1_CSR_SIZE_GRP_GROUPING_ID GENMASK_ULL(30, 16) /* Group Features/interfaces */ +#define DFHv1_CSR_SIZE_GRP_HAS_PARAMS BIT_ULL(31) /* Presence of Parameters */ +#define DFHv1_CSR_SIZE_GRP_SIZE GENMASK_ULL(63, 32) /* Size of CSR Block in bytes */ + +/* PARAM Header Register Bit Definitions */ +#define DFHv1_PARAM_HDR_ID GENMASK_ULL(15, 0) /* Id of this Param */ +#define DFHv1_PARAM_HDR_VER GENMASK_ULL(31, 16) /* Version Param */ +#define DFHv1_PARAM_HDR_NEXT_OFFSET GENMASK_ULL(63, 35) /* Offset of next Param */ +#define DFHv1_PARAM_HDR_NEXT_EOP BIT_ULL(32) +#define DFHv1_PARAM_DATA 0x08 /* Offset of Param data from Param header */ + +#define DFHv1_PARAM_ID_MSI_X 0x1 +#define DFHv1_PARAM_MSI_X_NUMV GENMASK_ULL(63, 32) +#define DFHv1_PARAM_MSI_X_STARTV GENMASK_ULL(31, 0) + /* Next AFU Register Bitfield */ #define NEXT_AFU_NEXT_DFH_OFST GENMASK_ULL(23, 0) /* Offset to next AFU */ @@ -89,6 +126,7 @@ #define FME_HDR_NEXT_AFU NEXT_AFU #define FME_HDR_CAP 0x30 #define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8)) +#define FME_PORT_OFST_BAR_SKIP 7 #define FME_HDR_BITSTREAM_ID 0x60 #define FME_HDR_BITSTREAM_MD 0x68 @@ -169,6 +207,8 @@ #define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */ #define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */ +struct dfl_feature_dev_data; + /** * struct dfl_fpga_port_ops - port ops * @@ -182,15 +222,15 @@ struct dfl_fpga_port_ops { const char *name; struct module *owner; struct list_head node; - int (*get_id)(struct platform_device *pdev); - int (*enable_set)(struct platform_device *pdev, bool enable); + int (*get_id)(struct dfl_feature_dev_data *fdata); + int (*enable_set)(struct dfl_feature_dev_data *fdata, bool enable); }; void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops); void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops); -struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev); +struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata); void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops); -int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id); +int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id); /** * struct dfl_feature_id - dfl private feature id @@ -230,19 +270,24 @@ struct dfl_feature_irq_ctx { * * @dev: ptr to pdev of the feature device which has the sub feature. * @id: sub feature id. + * @revision: revision of this sub feature. * @resource_index: each sub feature has one mmio resource for its registers. * this index is used to find its mmio resource from the - * feature dev (platform device)'s reources. + * feature dev (platform device)'s resources. * @ioaddr: mapped mmio resource address. * @irq_ctx: interrupt context list. * @nr_irqs: number of interrupt contexts. * @ops: ops of this sub feature. * @ddev: ptr to the dfl device of this sub feature. * @priv: priv data of this feature. + * @dfh_version: version of the DFH + * @param_size: size of dfh parameters + * @params: point to memory copy of dfh parameters */ struct dfl_feature { struct platform_device *dev; u16 id; + u8 revision; int resource_index; void __iomem *ioaddr; struct dfl_feature_irq_ctx *irq_ctx; @@ -250,31 +295,40 @@ struct dfl_feature { const struct dfl_feature_ops *ops; struct dfl_device *ddev; void *priv; + u8 dfh_version; + unsigned int param_size; + void *params; }; #define FEATURE_DEV_ID_UNUSED (-1) /** - * struct dfl_feature_platform_data - platform data for feature devices + * struct dfl_feature_dev_data - dfl enumeration data for dfl feature dev. * - * @node: node to link feature devs to container device's port_dev_list. - * @lock: mutex to protect platform data. - * @cdev: cdev of feature dev. - * @dev: ptr to platform device linked with this platform data. + * @node: node to link the data structure to container device's port_dev_list. + * @lock: mutex to protect feature dev data. + * @dev: ptr to the feature's platform device linked with this structure. + * @type: type of DFL FIU for the feature dev. See enum dfl_id_type. + * @pdev_id: platform device id for the feature dev. + * @pdev_name: platform device name for the feature dev. * @dfl_cdev: ptr to container device. - * @id: id used for this feature device. + * @id: id used for the feature device. * @disable_count: count for port disable. * @excl_open: set on feature device exclusive open. * @open_count: count for feature device open. * @num: number for sub features. * @private: ptr to feature dev private data. - * @features: sub features of this feature dev. + * @features: sub features for the feature dev. + * @resource_num: number of resources for the feature dev. + * @resources: resources for the feature dev. */ -struct dfl_feature_platform_data { +struct dfl_feature_dev_data { struct list_head node; struct mutex lock; - struct cdev cdev; struct platform_device *dev; + enum dfl_id_type type; + int pdev_id; + const char *pdev_name; struct dfl_fpga_cdev *dfl_cdev; int id; unsigned int disable_count; @@ -282,55 +336,68 @@ struct dfl_feature_platform_data { int open_count; void *private; int num; - struct dfl_feature features[]; + struct dfl_feature *features; + int resource_num; + struct resource *resources; +}; + +/** + * struct dfl_feature_platform_data - platform data for feature devices + * + * @cdev: cdev of feature dev. + * @fdata: dfl enumeration data for the dfl feature device. + */ +struct dfl_feature_platform_data { + struct cdev cdev; + struct dfl_feature_dev_data *fdata; }; static inline -int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata, +int dfl_feature_dev_use_begin(struct dfl_feature_dev_data *fdata, bool excl) { - if (pdata->excl_open) + if (fdata->excl_open) return -EBUSY; if (excl) { - if (pdata->open_count) + if (fdata->open_count) return -EBUSY; - pdata->excl_open = true; + fdata->excl_open = true; } - pdata->open_count++; + fdata->open_count++; return 0; } static inline -void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata) +void dfl_feature_dev_use_end(struct dfl_feature_dev_data *fdata) { - pdata->excl_open = false; + fdata->excl_open = false; - if (WARN_ON(pdata->open_count <= 0)) + if (WARN_ON(fdata->open_count <= 0)) return; - pdata->open_count--; + fdata->open_count--; } static inline -int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata) +int dfl_feature_dev_use_count(struct dfl_feature_dev_data *fdata) { - return pdata->open_count; + return fdata->open_count; } static inline -void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata, +void dfl_fpga_fdata_set_private(struct dfl_feature_dev_data *fdata, void *private) { - pdata->private = private; + fdata->private = private; } static inline -void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata) +void *dfl_fpga_fdata_get_private(struct dfl_feature_dev_data *fdata) { - return pdata->private; + return fdata->private; } struct dfl_feature_ops { @@ -353,37 +420,36 @@ int dfl_fpga_dev_ops_register(struct platform_device *pdev, struct module *owner); void dfl_fpga_dev_ops_unregister(struct platform_device *pdev); -static inline -struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode) +static inline struct dfl_feature_dev_data * +dfl_fpga_inode_to_feature_dev_data(struct inode *inode) { struct dfl_feature_platform_data *pdata; pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data, cdev); - return pdata->dev; + return pdata->fdata; } -#define dfl_fpga_dev_for_each_feature(pdata, feature) \ - for ((feature) = (pdata)->features; \ - (feature) < (pdata)->features + (pdata)->num; (feature)++) +#define dfl_fpga_dev_for_each_feature(fdata, feature) \ + for ((feature) = (fdata)->features; \ + (feature) < (fdata)->features + (fdata)->num; (feature)++) -static inline -struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id) +static inline struct dfl_feature * +dfl_get_feature_by_id(struct dfl_feature_dev_data *fdata, u16 id) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); struct dfl_feature *feature; - dfl_fpga_dev_for_each_feature(pdata, feature) + dfl_fpga_dev_for_each_feature(fdata, feature) if (feature->id == id) return feature; return NULL; } -static inline -void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id) +static inline void __iomem * +dfl_get_feature_ioaddr_by_id(struct dfl_feature_dev_data *fdata, u16 id) { - struct dfl_feature *feature = dfl_get_feature_by_id(dev, id); + struct dfl_feature *feature = dfl_get_feature_by_id(fdata, id); if (feature && feature->ioaddr) return feature->ioaddr; @@ -392,15 +458,18 @@ void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id) return NULL; } -static inline bool is_dfl_feature_present(struct device *dev, u16 id) +static inline struct dfl_feature_dev_data * +to_dfl_feature_dev_data(struct device *dev) { - return !!dfl_get_feature_ioaddr_by_id(dev, id); + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + + return pdata->fdata; } static inline -struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata) +struct device *dfl_fpga_fdata_to_parent(struct dfl_feature_dev_data *fdata) { - return pdata->dev->dev.parent->parent; + return fdata->dev->dev.parent->parent; } static inline bool dfl_feature_is_fme(void __iomem *base) @@ -482,26 +551,21 @@ struct dfl_fpga_cdev * dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info); void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev); -/* - * need to drop the device reference with put_device() after use port platform - * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port - * functions. - */ -struct platform_device * -__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data, - int (*match)(struct platform_device *, void *)); +struct dfl_feature_dev_data * +__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data, + int (*match)(struct dfl_feature_dev_data *, void *)); -static inline struct platform_device * -dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data, - int (*match)(struct platform_device *, void *)) +static inline struct dfl_feature_dev_data * +dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data, + int (*match)(struct dfl_feature_dev_data *, void *)) { - struct platform_device *pdev; + struct dfl_feature_dev_data *fdata; mutex_lock(&cdev->lock); - pdev = __dfl_fpga_cdev_find_port(cdev, data, match); + fdata = __dfl_fpga_cdev_find_port_data(cdev, data, match); mutex_unlock(&cdev->lock); - return pdev; + return fdata; } int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id); diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 2bfb2ff86930..8ef395b49bf8 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -14,7 +14,7 @@ #include <linux/spinlock.h> static DEFINE_IDA(fpga_bridge_ida); -static struct class *fpga_bridge_class; +static const struct class fpga_bridge_class; /* Lock for adding/removing bridges to linked lists*/ static DEFINE_SPINLOCK(bridge_list_lock); @@ -30,7 +30,7 @@ int fpga_bridge_enable(struct fpga_bridge *bridge) { dev_dbg(&bridge->dev, "enable\n"); - if (bridge->br_ops && bridge->br_ops->enable_set) + if (bridge->br_ops->enable_set) return bridge->br_ops->enable_set(bridge, 1); return 0; @@ -48,62 +48,61 @@ int fpga_bridge_disable(struct fpga_bridge *bridge) { dev_dbg(&bridge->dev, "disable\n"); - if (bridge->br_ops && bridge->br_ops->enable_set) + if (bridge->br_ops->enable_set) return bridge->br_ops->enable_set(bridge, 0); return 0; } EXPORT_SYMBOL_GPL(fpga_bridge_disable); -static struct fpga_bridge *__fpga_bridge_get(struct device *dev, +static struct fpga_bridge *__fpga_bridge_get(struct device *bridge_dev, struct fpga_image_info *info) { struct fpga_bridge *bridge; - int ret = -ENODEV; - bridge = to_fpga_bridge(dev); + bridge = to_fpga_bridge(bridge_dev); bridge->info = info; - if (!mutex_trylock(&bridge->mutex)) { - ret = -EBUSY; - goto err_dev; - } + if (!mutex_trylock(&bridge->mutex)) + return ERR_PTR(-EBUSY); - if (!try_module_get(dev->parent->driver->owner)) - goto err_ll_mod; + if (!try_module_get(bridge->br_ops_owner)) { + mutex_unlock(&bridge->mutex); + return ERR_PTR(-ENODEV); + } dev_dbg(&bridge->dev, "get\n"); return bridge; - -err_ll_mod: - mutex_unlock(&bridge->mutex); -err_dev: - put_device(dev); - return ERR_PTR(ret); } /** * of_fpga_bridge_get - get an exclusive reference to an fpga bridge * - * @np: node pointer of an FPGA bridge - * @info: fpga image specific information + * @np: node pointer of an FPGA bridge. + * @info: fpga image specific information. * - * Return fpga_bridge struct if successful. - * Return -EBUSY if someone already has a reference to the bridge. - * Return -ENODEV if @np is not an FPGA Bridge. + * Return: + * * fpga_bridge struct pointer if successful. + * * -EBUSY if someone already has a reference to the bridge. + * * -ENODEV if @np is not an FPGA Bridge or can't take parent driver refcount. */ struct fpga_bridge *of_fpga_bridge_get(struct device_node *np, struct fpga_image_info *info) { - struct device *dev; + struct fpga_bridge *bridge; + struct device *bridge_dev; - dev = class_find_device_by_of_node(fpga_bridge_class, np); - if (!dev) + bridge_dev = class_find_device_by_of_node(&fpga_bridge_class, np); + if (!bridge_dev) return ERR_PTR(-ENODEV); - return __fpga_bridge_get(dev, info); + bridge = __fpga_bridge_get(bridge_dev, info); + if (IS_ERR(bridge)) + put_device(bridge_dev); + + return bridge; } EXPORT_SYMBOL_GPL(of_fpga_bridge_get); @@ -115,7 +114,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data) /** * fpga_bridge_get - get an exclusive reference to an fpga bridge * @dev: parent device that fpga bridge was registered with - * @info: fpga manager info + * @info: fpga image specific information * * Given a device, get an exclusive reference to an fpga bridge. * @@ -124,14 +123,19 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data) struct fpga_bridge *fpga_bridge_get(struct device *dev, struct fpga_image_info *info) { + struct fpga_bridge *bridge; struct device *bridge_dev; - bridge_dev = class_find_device(fpga_bridge_class, NULL, dev, + bridge_dev = class_find_device(&fpga_bridge_class, NULL, dev, fpga_bridge_dev_match); if (!bridge_dev) return ERR_PTR(-ENODEV); - return __fpga_bridge_get(bridge_dev, info); + bridge = __fpga_bridge_get(bridge_dev, info); + if (IS_ERR(bridge)) + put_device(bridge_dev); + + return bridge; } EXPORT_SYMBOL_GPL(fpga_bridge_get); @@ -145,7 +149,7 @@ void fpga_bridge_put(struct fpga_bridge *bridge) dev_dbg(&bridge->dev, "put\n"); bridge->info = NULL; - module_put(bridge->dev.parent->driver->owner); + module_put(bridge->br_ops_owner); mutex_unlock(&bridge->mutex); put_device(&bridge->dev); } @@ -155,9 +159,9 @@ EXPORT_SYMBOL_GPL(fpga_bridge_put); * fpga_bridges_enable - enable bridges in a list * @bridge_list: list of FPGA bridges * - * Enable each bridge in the list. If list is empty, do nothing. + * Enable each bridge in the list. If list is empty, do nothing. * - * Return 0 for success or empty bridge list; return error code otherwise. + * Return: 0 for success or empty bridge list or an error code otherwise. */ int fpga_bridges_enable(struct list_head *bridge_list) { @@ -179,9 +183,9 @@ EXPORT_SYMBOL_GPL(fpga_bridges_enable); * * @bridge_list: list of FPGA bridges * - * Disable each bridge in the list. If list is empty, do nothing. + * Disable each bridge in the list. If list is empty, do nothing. * - * Return 0 for success or empty bridge list; return error code otherwise. + * Return: 0 for success or empty bridge list or an error code otherwise. */ int fpga_bridges_disable(struct list_head *bridge_list) { @@ -228,9 +232,9 @@ EXPORT_SYMBOL_GPL(fpga_bridges_put); * @info: fpga image specific information * @bridge_list: list of FPGA bridges * - * Get an exclusive reference to the bridge and and it to the list. + * Get an exclusive reference to the bridge and it to the list. * - * Return 0 for success, error code from of_fpga_bridge_get() othewise. + * Return: 0 for success, error code from of_fpga_bridge_get() otherwise. */ int of_fpga_bridge_get_to_list(struct device_node *np, struct fpga_image_info *info, @@ -258,9 +262,9 @@ EXPORT_SYMBOL_GPL(of_fpga_bridge_get_to_list); * @info: fpga image specific information * @bridge_list: list of FPGA bridges * - * Get an exclusive reference to the bridge and and it to the list. + * Get an exclusive reference to the bridge and it to the list. * - * Return 0 for success, error code from fpga_bridge_get() othewise. + * Return: 0 for success, error code from fpga_bridge_get() otherwise. */ int fpga_bridge_get_to_list(struct device *dev, struct fpga_image_info *info, @@ -293,12 +297,15 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fpga_bridge *bridge = to_fpga_bridge(dev); - int enable = 1; + int state = 1; - if (bridge->br_ops && bridge->br_ops->enable_show) - enable = bridge->br_ops->enable_show(bridge); + if (bridge->br_ops->enable_show) { + state = bridge->br_ops->enable_show(bridge); + if (state < 0) + return state; + } - return sprintf(buf, "%s\n", enable ? "enabled" : "disabled"); + return sysfs_emit(buf, "%s\n", state ? "enabled" : "disabled"); } static DEVICE_ATTR_RO(name); @@ -312,47 +319,53 @@ static struct attribute *fpga_bridge_attrs[] = { ATTRIBUTE_GROUPS(fpga_bridge); /** - * fpga_bridge_create - create and initialize a struct fpga_bridge + * __fpga_bridge_register - create and register an FPGA Bridge device * @parent: FPGA bridge device from pdev * @name: FPGA bridge name * @br_ops: pointer to structure of fpga bridge ops * @priv: FPGA bridge private data + * @owner: owner module containing the br_ops * - * The caller of this function is responsible for freeing the bridge with - * fpga_bridge_free(). Using devm_fpga_bridge_create() instead is recommended. - * - * Return: struct fpga_bridge or NULL + * Return: struct fpga_bridge pointer or ERR_PTR() */ -struct fpga_bridge *fpga_bridge_create(struct device *parent, const char *name, - const struct fpga_bridge_ops *br_ops, - void *priv) +struct fpga_bridge * +__fpga_bridge_register(struct device *parent, const char *name, + const struct fpga_bridge_ops *br_ops, + void *priv, struct module *owner) { struct fpga_bridge *bridge; int id, ret; + if (!br_ops) { + dev_err(parent, "Attempt to register without fpga_bridge_ops\n"); + return ERR_PTR(-EINVAL); + } + if (!name || !strlen(name)) { dev_err(parent, "Attempt to register with no name!\n"); - return NULL; + return ERR_PTR(-EINVAL); } bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); if (!bridge) - return NULL; + return ERR_PTR(-ENOMEM); - id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL); - if (id < 0) + id = ida_alloc(&fpga_bridge_ida, GFP_KERNEL); + if (id < 0) { + ret = id; goto error_kfree; + } mutex_init(&bridge->mutex); INIT_LIST_HEAD(&bridge->node); bridge->name = name; bridge->br_ops = br_ops; + bridge->br_ops_owner = owner; bridge->priv = priv; - device_initialize(&bridge->dev); bridge->dev.groups = br_ops->groups; - bridge->dev.class = fpga_bridge_class; + bridge->dev.class = &fpga_bridge_class; bridge->dev.parent = parent; bridge->dev.of_node = parent->of_node; bridge->dev.id = id; @@ -361,97 +374,24 @@ struct fpga_bridge *fpga_bridge_create(struct device *parent, const char *name, if (ret) goto error_device; + ret = device_register(&bridge->dev); + if (ret) { + put_device(&bridge->dev); + return ERR_PTR(ret); + } + + of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev); + return bridge; error_device: - ida_simple_remove(&fpga_bridge_ida, id); + ida_free(&fpga_bridge_ida, id); error_kfree: kfree(bridge); - return NULL; -} -EXPORT_SYMBOL_GPL(fpga_bridge_create); - -/** - * fpga_bridge_free - free an fpga bridge created by fpga_bridge_create() - * @bridge: FPGA bridge struct - */ -void fpga_bridge_free(struct fpga_bridge *bridge) -{ - ida_simple_remove(&fpga_bridge_ida, bridge->dev.id); - kfree(bridge); -} -EXPORT_SYMBOL_GPL(fpga_bridge_free); - -static void devm_fpga_bridge_release(struct device *dev, void *res) -{ - struct fpga_bridge *bridge = *(struct fpga_bridge **)res; - - fpga_bridge_free(bridge); -} - -/** - * devm_fpga_bridge_create - create and init a managed struct fpga_bridge - * @parent: FPGA bridge device from pdev - * @name: FPGA bridge name - * @br_ops: pointer to structure of fpga bridge ops - * @priv: FPGA bridge private data - * - * This function is intended for use in an FPGA bridge driver's probe function. - * After the bridge driver creates the struct with devm_fpga_bridge_create(), it - * should register the bridge with fpga_bridge_register(). The bridge driver's - * remove function should call fpga_bridge_unregister(). The bridge struct - * allocated with this function will be freed automatically on driver detach. - * This includes the case of a probe function returning error before calling - * fpga_bridge_register(), the struct will still get cleaned up. - * - * Return: struct fpga_bridge or NULL - */ -struct fpga_bridge -*devm_fpga_bridge_create(struct device *parent, const char *name, - const struct fpga_bridge_ops *br_ops, void *priv) -{ - struct fpga_bridge **ptr, *bridge; - - ptr = devres_alloc(devm_fpga_bridge_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - bridge = fpga_bridge_create(parent, name, br_ops, priv); - if (!bridge) { - devres_free(ptr); - } else { - *ptr = bridge; - devres_add(parent, ptr); - } - - return bridge; -} -EXPORT_SYMBOL_GPL(devm_fpga_bridge_create); - -/** - * fpga_bridge_register - register an FPGA bridge - * - * @bridge: FPGA bridge struct - * - * Return: 0 for success, error code otherwise. - */ -int fpga_bridge_register(struct fpga_bridge *bridge) -{ - struct device *dev = &bridge->dev; - int ret; - - ret = device_add(dev); - if (ret) - return ret; - - of_platform_populate(dev->of_node, NULL, NULL, dev); - - dev_info(dev->parent, "fpga bridge [%s] registered\n", bridge->name); - - return 0; + return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_bridge_register); +EXPORT_SYMBOL_GPL(__fpga_bridge_register); /** * fpga_bridge_unregister - unregister an FPGA bridge @@ -466,7 +406,7 @@ void fpga_bridge_unregister(struct fpga_bridge *bridge) * If the low level driver provides a method for putting bridge into * a desired state upon unregister, do it. */ - if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove) + if (bridge->br_ops->fpga_bridge_remove) bridge->br_ops->fpga_bridge_remove(bridge); device_unregister(&bridge->dev); @@ -475,23 +415,26 @@ EXPORT_SYMBOL_GPL(fpga_bridge_unregister); static void fpga_bridge_dev_release(struct device *dev) { + struct fpga_bridge *bridge = to_fpga_bridge(dev); + + ida_free(&fpga_bridge_ida, bridge->dev.id); + kfree(bridge); } +static const struct class fpga_bridge_class = { + .name = "fpga_bridge", + .dev_groups = fpga_bridge_groups, + .dev_release = fpga_bridge_dev_release, +}; + static int __init fpga_bridge_dev_init(void) { - fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge"); - if (IS_ERR(fpga_bridge_class)) - return PTR_ERR(fpga_bridge_class); - - fpga_bridge_class->dev_groups = fpga_bridge_groups; - fpga_bridge_class->dev_release = fpga_bridge_dev_release; - - return 0; + return class_register(&fpga_bridge_class); } static void __exit fpga_bridge_dev_exit(void) { - class_destroy(fpga_bridge_class); + class_unregister(&fpga_bridge_class); ida_destroy(&fpga_bridge_ida); } diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index ecb4c3c795fa..0f4035b089a2 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -19,12 +19,87 @@ #include <linux/highmem.h> static DEFINE_IDA(fpga_mgr_ida); -static struct class *fpga_mgr_class; +static const struct class fpga_mgr_class; struct fpga_mgr_devres { struct fpga_manager *mgr; }; +static inline void fpga_mgr_fpga_remove(struct fpga_manager *mgr) +{ + if (mgr->mops->fpga_remove) + mgr->mops->fpga_remove(mgr); +} + +static inline enum fpga_mgr_states fpga_mgr_state(struct fpga_manager *mgr) +{ + if (mgr->mops->state) + return mgr->mops->state(mgr); + return FPGA_MGR_STATE_UNKNOWN; +} + +static inline u64 fpga_mgr_status(struct fpga_manager *mgr) +{ + if (mgr->mops->status) + return mgr->mops->status(mgr); + return 0; +} + +static inline int fpga_mgr_write(struct fpga_manager *mgr, const char *buf, size_t count) +{ + if (mgr->mops->write) + return mgr->mops->write(mgr, buf, count); + return -EOPNOTSUPP; +} + +/* + * After all the FPGA image has been written, do the device specific steps to + * finish and set the FPGA into operating mode. + */ +static inline int fpga_mgr_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + int ret = 0; + + mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE; + if (mgr->mops->write_complete) + ret = mgr->mops->write_complete(mgr, info); + if (ret) { + dev_err(&mgr->dev, "Error after writing image data to FPGA\n"); + mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR; + return ret; + } + mgr->state = FPGA_MGR_STATE_OPERATING; + + return 0; +} + +static inline int fpga_mgr_parse_header(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count) +{ + if (mgr->mops->parse_header) + return mgr->mops->parse_header(mgr, info, buf, count); + return 0; +} + +static inline int fpga_mgr_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count) +{ + if (mgr->mops->write_init) + return mgr->mops->write_init(mgr, info, buf, count); + return 0; +} + +static inline int fpga_mgr_write_sg(struct fpga_manager *mgr, + struct sg_table *sgt) +{ + if (mgr->mops->write_sg) + return mgr->mops->write_sg(mgr, sgt); + return -EOPNOTSUPP; +} + /** * fpga_image_info_alloc - Allocate an FPGA image info struct * @dev: owning device @@ -70,23 +145,141 @@ void fpga_image_info_free(struct fpga_image_info *info) EXPORT_SYMBOL_GPL(fpga_image_info_free); /* - * Call the low level driver's write_init function. This will do the + * Call the low level driver's parse_header function with entire FPGA image + * buffer on the input. This will set info->header_size and info->data_size. + */ +static int fpga_mgr_parse_header_mapped(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count) +{ + int ret; + + mgr->state = FPGA_MGR_STATE_PARSE_HEADER; + ret = fpga_mgr_parse_header(mgr, info, buf, count); + + if (info->header_size + info->data_size > count) { + dev_err(&mgr->dev, "Bitstream data outruns FPGA image\n"); + ret = -EINVAL; + } + + if (ret) { + dev_err(&mgr->dev, "Error while parsing FPGA image header\n"); + mgr->state = FPGA_MGR_STATE_PARSE_HEADER_ERR; + } + + return ret; +} + +/* + * Call the low level driver's parse_header function with first fragment of + * scattered FPGA image on the input. If header fits first fragment, + * parse_header will set info->header_size and info->data_size. If it is not, + * parse_header will set desired size to info->header_size and -EAGAIN will be + * returned. + */ +static int fpga_mgr_parse_header_sg_first(struct fpga_manager *mgr, + struct fpga_image_info *info, + struct sg_table *sgt) +{ + struct sg_mapping_iter miter; + int ret; + + mgr->state = FPGA_MGR_STATE_PARSE_HEADER; + + sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG); + if (sg_miter_next(&miter) && + miter.length >= info->header_size) + ret = fpga_mgr_parse_header(mgr, info, miter.addr, miter.length); + else + ret = -EAGAIN; + sg_miter_stop(&miter); + + if (ret && ret != -EAGAIN) { + dev_err(&mgr->dev, "Error while parsing FPGA image header\n"); + mgr->state = FPGA_MGR_STATE_PARSE_HEADER_ERR; + } + + return ret; +} + +/* + * Copy scattered FPGA image fragments to temporary buffer and call the + * low level driver's parse_header function. This should be called after + * fpga_mgr_parse_header_sg_first() returned -EAGAIN. In case of success, + * pointer to the newly allocated image header copy will be returned and + * its size will be set into *ret_size. Returned buffer needs to be freed. + */ +static void *fpga_mgr_parse_header_sg(struct fpga_manager *mgr, + struct fpga_image_info *info, + struct sg_table *sgt, size_t *ret_size) +{ + size_t len, new_header_size, header_size = 0; + char *new_buf, *buf = NULL; + int ret; + + do { + new_header_size = info->header_size; + if (new_header_size <= header_size) { + dev_err(&mgr->dev, "Requested invalid header size\n"); + ret = -EFAULT; + break; + } + + new_buf = krealloc(buf, new_header_size, GFP_KERNEL); + if (!new_buf) { + ret = -ENOMEM; + break; + } + + buf = new_buf; + + len = sg_pcopy_to_buffer(sgt->sgl, sgt->nents, + buf + header_size, + new_header_size - header_size, + header_size); + if (len != new_header_size - header_size) { + ret = -EFAULT; + break; + } + + header_size = new_header_size; + ret = fpga_mgr_parse_header(mgr, info, buf, header_size); + } while (ret == -EAGAIN); + + if (ret) { + dev_err(&mgr->dev, "Error while parsing FPGA image header\n"); + mgr->state = FPGA_MGR_STATE_PARSE_HEADER_ERR; + kfree(buf); + buf = ERR_PTR(ret); + } + + *ret_size = header_size; + + return buf; +} + +/* + * Call the low level driver's write_init function. This will do the * device-specific things to get the FPGA into the state where it is ready to - * receive an FPGA image. The low level driver only gets to see the first - * initial_header_size bytes in the buffer. + * receive an FPGA image. The low level driver gets to see at least first + * info->header_size bytes in the buffer. If info->header_size is 0, + * write_init will not get any bytes of image buffer. */ static int fpga_mgr_write_init_buf(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count) { + size_t header_size = info->header_size; int ret; mgr->state = FPGA_MGR_STATE_WRITE_INIT; - if (!mgr->mops->initial_header_size) - ret = mgr->mops->write_init(mgr, info, NULL, 0); + + if (header_size > count) + ret = -EINVAL; + else if (!header_size) + ret = fpga_mgr_write_init(mgr, info, NULL, 0); else - ret = mgr->mops->write_init( - mgr, info, buf, min(mgr->mops->initial_header_size, count)); + ret = fpga_mgr_write_init(mgr, info, buf, count); if (ret) { dev_err(&mgr->dev, "Error preparing FPGA for writing\n"); @@ -97,39 +290,50 @@ static int fpga_mgr_write_init_buf(struct fpga_manager *mgr, return 0; } -static int fpga_mgr_write_init_sg(struct fpga_manager *mgr, - struct fpga_image_info *info, - struct sg_table *sgt) +static int fpga_mgr_prepare_sg(struct fpga_manager *mgr, + struct fpga_image_info *info, + struct sg_table *sgt) { struct sg_mapping_iter miter; size_t len; char *buf; int ret; - if (!mgr->mops->initial_header_size) + /* Short path. Low level driver don't care about image header. */ + if (!mgr->mops->initial_header_size && !mgr->mops->parse_header) return fpga_mgr_write_init_buf(mgr, info, NULL, 0); /* * First try to use miter to map the first fragment to access the * header, this is the typical path. */ - sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG); - if (sg_miter_next(&miter) && - miter.length >= mgr->mops->initial_header_size) { - ret = fpga_mgr_write_init_buf(mgr, info, miter.addr, - miter.length); + ret = fpga_mgr_parse_header_sg_first(mgr, info, sgt); + /* If 0, header fits first fragment, call write_init on it */ + if (!ret) { + sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG); + if (sg_miter_next(&miter)) { + ret = fpga_mgr_write_init_buf(mgr, info, miter.addr, + miter.length); + sg_miter_stop(&miter); + return ret; + } sg_miter_stop(&miter); + /* + * If -EAGAIN, more sg buffer is needed, + * otherwise an error has occurred. + */ + } else if (ret != -EAGAIN) { return ret; } - sg_miter_stop(&miter); - /* Otherwise copy the fragments into temporary memory. */ - buf = kmalloc(mgr->mops->initial_header_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; + /* + * Copy the fragments into temporary memory. + * Copying is done inside fpga_mgr_parse_header_sg(). + */ + buf = fpga_mgr_parse_header_sg(mgr, info, sgt, &len); + if (IS_ERR(buf)) + return PTR_ERR(buf); - len = sg_copy_to_buffer(sgt->sgl, sgt->nents, buf, - mgr->mops->initial_header_size); ret = fpga_mgr_write_init_buf(mgr, info, buf, len); kfree(buf); @@ -137,27 +341,6 @@ static int fpga_mgr_write_init_sg(struct fpga_manager *mgr, return ret; } -/* - * After all the FPGA image has been written, do the device specific steps to - * finish and set the FPGA into operating mode. - */ -static int fpga_mgr_write_complete(struct fpga_manager *mgr, - struct fpga_image_info *info) -{ - int ret; - - mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE; - ret = mgr->mops->write_complete(mgr, info); - if (ret) { - dev_err(&mgr->dev, "Error after writing image data to FPGA\n"); - mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR; - return ret; - } - mgr->state = FPGA_MGR_STATE_OPERATING; - - return 0; -} - /** * fpga_mgr_buf_load_sg - load fpga from image in buffer from a scatter list * @mgr: fpga manager @@ -181,26 +364,44 @@ static int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, { int ret; - ret = fpga_mgr_write_init_sg(mgr, info, sgt); + ret = fpga_mgr_prepare_sg(mgr, info, sgt); if (ret) return ret; /* Write the FPGA image to the FPGA. */ mgr->state = FPGA_MGR_STATE_WRITE; if (mgr->mops->write_sg) { - ret = mgr->mops->write_sg(mgr, sgt); + ret = fpga_mgr_write_sg(mgr, sgt); } else { + size_t length, count = 0, data_size = info->data_size; struct sg_mapping_iter miter; sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG); + + if (mgr->mops->skip_header && + !sg_miter_skip(&miter, info->header_size)) { + ret = -EINVAL; + goto out; + } + while (sg_miter_next(&miter)) { - ret = mgr->mops->write(mgr, miter.addr, miter.length); + if (data_size) + length = min(miter.length, data_size - count); + else + length = miter.length; + + ret = fpga_mgr_write(mgr, miter.addr, length); if (ret) break; + + count += length; + if (data_size && count >= data_size) + break; } sg_miter_stop(&miter); } +out: if (ret) { dev_err(&mgr->dev, "Error while writing image data to FPGA\n"); mgr->state = FPGA_MGR_STATE_WRITE_ERR; @@ -216,15 +417,27 @@ static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr, { int ret; + ret = fpga_mgr_parse_header_mapped(mgr, info, buf, count); + if (ret) + return ret; + ret = fpga_mgr_write_init_buf(mgr, info, buf, count); if (ret) return ret; + if (mgr->mops->skip_header) { + buf += info->header_size; + count -= info->header_size; + } + + if (info->data_size) + count = info->data_size; + /* * Write the FPGA image to the FPGA. */ mgr->state = FPGA_MGR_STATE_WRITE; - ret = mgr->mops->write(mgr, buf, count); + ret = fpga_mgr_write(mgr, buf, count); if (ret) { dev_err(&mgr->dev, "Error while writing image data to FPGA\n"); mgr->state = FPGA_MGR_STATE_WRITE_ERR; @@ -358,6 +571,8 @@ static int fpga_mgr_firmware_load(struct fpga_manager *mgr, */ int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info) { + info->header_size = mgr->mops->initial_header_size; + if (info->sgt) return fpga_mgr_buf_load_sg(mgr, info, info->sgt); if (info->buf && info->count) @@ -378,6 +593,10 @@ static const char * const state_str[] = { [FPGA_MGR_STATE_FIRMWARE_REQ] = "firmware request", [FPGA_MGR_STATE_FIRMWARE_REQ_ERR] = "firmware request error", + /* Parse FPGA image header */ + [FPGA_MGR_STATE_PARSE_HEADER] = "parse header", + [FPGA_MGR_STATE_PARSE_HEADER_ERR] = "parse header error", + /* Preparing FPGA to receive image */ [FPGA_MGR_STATE_WRITE_INIT] = "write init", [FPGA_MGR_STATE_WRITE_INIT_ERR] = "write init error", @@ -417,10 +636,7 @@ static ssize_t status_show(struct device *dev, u64 status; int len = 0; - if (!mgr->mops->status) - return -ENOENT; - - status = mgr->mops->status(mgr); + status = fpga_mgr_status(mgr); if (status & FPGA_MGR_STATUS_OPERATION_ERR) len += sprintf(buf + len, "reconfig operation error\n"); @@ -448,20 +664,16 @@ static struct attribute *fpga_mgr_attrs[] = { }; ATTRIBUTE_GROUPS(fpga_mgr); -static struct fpga_manager *__fpga_mgr_get(struct device *dev) +static struct fpga_manager *__fpga_mgr_get(struct device *mgr_dev) { struct fpga_manager *mgr; - mgr = to_fpga_manager(dev); + mgr = to_fpga_manager(mgr_dev); - if (!try_module_get(dev->parent->driver->owner)) - goto err_dev; + if (!try_module_get(mgr->mops_owner)) + mgr = ERR_PTR(-ENODEV); return mgr; - -err_dev: - put_device(dev); - return ERR_PTR(-ENODEV); } static int fpga_mgr_dev_match(struct device *dev, const void *data) @@ -477,12 +689,18 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data) */ struct fpga_manager *fpga_mgr_get(struct device *dev) { - struct device *mgr_dev = class_find_device(fpga_mgr_class, NULL, dev, - fpga_mgr_dev_match); + struct fpga_manager *mgr; + struct device *mgr_dev; + + mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev, fpga_mgr_dev_match); if (!mgr_dev) return ERR_PTR(-ENODEV); - return __fpga_mgr_get(mgr_dev); + mgr = __fpga_mgr_get(mgr_dev); + if (IS_ERR(mgr)) + put_device(mgr_dev); + + return mgr; } EXPORT_SYMBOL_GPL(fpga_mgr_get); @@ -495,13 +713,18 @@ EXPORT_SYMBOL_GPL(fpga_mgr_get); */ struct fpga_manager *of_fpga_mgr_get(struct device_node *node) { - struct device *dev; + struct fpga_manager *mgr; + struct device *mgr_dev; - dev = class_find_device_by_of_node(fpga_mgr_class, node); - if (!dev) + mgr_dev = class_find_device_by_of_node(&fpga_mgr_class, node); + if (!mgr_dev) return ERR_PTR(-ENODEV); - return __fpga_mgr_get(dev); + mgr = __fpga_mgr_get(mgr_dev); + if (IS_ERR(mgr)) + put_device(mgr_dev); + + return mgr; } EXPORT_SYMBOL_GPL(of_fpga_mgr_get); @@ -511,7 +734,7 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get); */ void fpga_mgr_put(struct fpga_manager *mgr) { - module_put(mgr->dev.parent->driver->owner); + module_put(mgr->mops_owner); put_device(&mgr->dev); } EXPORT_SYMBOL_GPL(fpga_mgr_put); @@ -550,52 +773,54 @@ void fpga_mgr_unlock(struct fpga_manager *mgr) EXPORT_SYMBOL_GPL(fpga_mgr_unlock); /** - * fpga_mgr_create - create and initialize an FPGA manager struct + * __fpga_mgr_register_full - create and register an FPGA Manager device * @parent: fpga manager device from pdev - * @name: fpga manager name - * @mops: pointer to structure of fpga manager ops - * @priv: fpga manager private data + * @info: parameters for fpga manager + * @owner: owner module containing the ops * - * The caller of this function is responsible for freeing the struct with - * fpga_mgr_free(). Using devm_fpga_mgr_create() instead is recommended. + * The caller of this function is responsible for calling fpga_mgr_unregister(). + * Using devm_fpga_mgr_register_full() instead is recommended. * - * Return: pointer to struct fpga_manager or NULL + * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ -struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, - void *priv) +struct fpga_manager * +__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner) { + const struct fpga_manager_ops *mops = info->mops; struct fpga_manager *mgr; int id, ret; - if (!mops || !mops->write_complete || !mops->state || - !mops->write_init || (!mops->write && !mops->write_sg) || - (mops->write && mops->write_sg)) { + if (!mops) { dev_err(parent, "Attempt to register without fpga_manager_ops\n"); - return NULL; + return ERR_PTR(-EINVAL); } - if (!name || !strlen(name)) { + if (!info->name || !strlen(info->name)) { dev_err(parent, "Attempt to register with no name!\n"); - return NULL; + return ERR_PTR(-EINVAL); } mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); if (!mgr) - return NULL; + return ERR_PTR(-ENOMEM); - id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL); - if (id < 0) + id = ida_alloc(&fpga_mgr_ida, GFP_KERNEL); + if (id < 0) { + ret = id; goto error_kfree; + } mutex_init(&mgr->ref_mutex); - mgr->name = name; - mgr->mops = mops; - mgr->priv = priv; + mgr->mops_owner = owner; - device_initialize(&mgr->dev); - mgr->dev.class = fpga_mgr_class; + mgr->name = info->name; + mgr->mops = info->mops; + mgr->priv = info->priv; + mgr->compat_id = info->compat_id; + + mgr->dev.class = &fpga_mgr_class; mgr->dev.groups = mops->groups; mgr->dev.parent = parent; mgr->dev.of_node = parent->of_node; @@ -605,105 +830,59 @@ struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name, if (ret) goto error_device; + /* + * Initialize framework state by requesting low level driver read state + * from device. FPGA may be in reset mode or may have been programmed + * by bootloader or EEPROM. + */ + mgr->state = fpga_mgr_state(mgr); + + ret = device_register(&mgr->dev); + if (ret) { + put_device(&mgr->dev); + return ERR_PTR(ret); + } + return mgr; error_device: - ida_simple_remove(&fpga_mgr_ida, id); + ida_free(&fpga_mgr_ida, id); error_kfree: kfree(mgr); - return NULL; -} -EXPORT_SYMBOL_GPL(fpga_mgr_create); - -/** - * fpga_mgr_free - free an FPGA manager created with fpga_mgr_create() - * @mgr: fpga manager struct - */ -void fpga_mgr_free(struct fpga_manager *mgr) -{ - ida_simple_remove(&fpga_mgr_ida, mgr->dev.id); - kfree(mgr); -} -EXPORT_SYMBOL_GPL(fpga_mgr_free); - -static void devm_fpga_mgr_release(struct device *dev, void *res) -{ - struct fpga_mgr_devres *dr = res; - - fpga_mgr_free(dr->mgr); + return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(__fpga_mgr_register_full); /** - * devm_fpga_mgr_create - create and initialize a managed FPGA manager struct + * __fpga_mgr_register - create and register an FPGA Manager device * @parent: fpga manager device from pdev * @name: fpga manager name * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data + * @owner: owner module containing the ops * - * This function is intended for use in an FPGA manager driver's probe function. - * After the manager driver creates the manager struct with - * devm_fpga_mgr_create(), it should register it with fpga_mgr_register(). The - * manager driver's remove function should call fpga_mgr_unregister(). The - * manager struct allocated with this function will be freed automatically on - * driver detach. This includes the case of a probe function returning error - * before calling fpga_mgr_register(), the struct will still get cleaned up. + * The caller of this function is responsible for calling fpga_mgr_unregister(). + * Using devm_fpga_mgr_register() instead is recommended. This simple + * version of the register function should be sufficient for most users. The + * fpga_mgr_register_full() function is available for users that need to pass + * additional, optional parameters. * - * Return: pointer to struct fpga_manager or NULL + * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ -struct fpga_manager *devm_fpga_mgr_create(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, - void *priv) +struct fpga_manager * +__fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, struct module *owner) { - struct fpga_mgr_devres *dr; - - dr = devres_alloc(devm_fpga_mgr_release, sizeof(*dr), GFP_KERNEL); - if (!dr) - return NULL; - - dr->mgr = fpga_mgr_create(parent, name, mops, priv); - if (!dr->mgr) { - devres_free(dr); - return NULL; - } + struct fpga_manager_info info = { 0 }; - devres_add(parent, dr); + info.name = name; + info.mops = mops; + info.priv = priv; - return dr->mgr; + return __fpga_mgr_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(devm_fpga_mgr_create); - -/** - * fpga_mgr_register - register an FPGA manager - * @mgr: fpga manager struct - * - * Return: 0 on success, negative error code otherwise. - */ -int fpga_mgr_register(struct fpga_manager *mgr) -{ - int ret; - - /* - * Initialize framework state by requesting low level driver read state - * from device. FPGA may be in reset mode or may have been programmed - * by bootloader or EEPROM. - */ - mgr->state = mgr->mops->state(mgr); - - ret = device_add(&mgr->dev); - if (ret) - goto error_device; - - dev_info(&mgr->dev, "%s registered\n", mgr->name); - - return 0; - -error_device: - ida_simple_remove(&fpga_mgr_ida, mgr->dev.id); - - return ret; -} -EXPORT_SYMBOL_GPL(fpga_mgr_register); +EXPORT_SYMBOL_GPL(__fpga_mgr_register); /** * fpga_mgr_unregister - unregister an FPGA manager @@ -719,21 +898,12 @@ void fpga_mgr_unregister(struct fpga_manager *mgr) * If the low level driver provides a method for putting fpga into * a desired state upon unregister, do it. */ - if (mgr->mops->fpga_remove) - mgr->mops->fpga_remove(mgr); + fpga_mgr_fpga_remove(mgr); device_unregister(&mgr->dev); } EXPORT_SYMBOL_GPL(fpga_mgr_unregister); -static int fpga_mgr_devres_match(struct device *dev, void *res, - void *match_data) -{ - struct fpga_mgr_devres *dr = res; - - return match_data == dr->mgr; -} - static void devm_fpga_mgr_unregister(struct device *dev, void *res) { struct fpga_mgr_devres *dr = res; @@ -742,64 +912,93 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res) } /** - * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() - * @dev: managing device for this FPGA manager - * @mgr: fpga manager struct + * __devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register() + * @parent: fpga manager device from pdev + * @info: parameters for fpga manager + * @owner: owner module containing the ops * - * This is the devres variant of fpga_mgr_register() for which the unregister + * Return: fpga manager pointer on success, negative error code otherwise. + * + * This is the devres variant of fpga_mgr_register_full() for which the unregister * function will be called automatically when the managing device is detached. */ -int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr) +struct fpga_manager * +__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner) { struct fpga_mgr_devres *dr; - int ret; - - /* - * Make sure that the struct fpga_manager * that is passed in is - * managed itself. - */ - if (WARN_ON(!devres_find(dev, devm_fpga_mgr_release, - fpga_mgr_devres_match, mgr))) - return -EINVAL; + struct fpga_manager *mgr; dr = devres_alloc(devm_fpga_mgr_unregister, sizeof(*dr), GFP_KERNEL); if (!dr) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - ret = fpga_mgr_register(mgr); - if (ret) { + mgr = __fpga_mgr_register_full(parent, info, owner); + if (IS_ERR(mgr)) { devres_free(dr); - return ret; + return mgr; } dr->mgr = mgr; - devres_add(dev, dr); + devres_add(parent, dr); - return 0; + return mgr; } -EXPORT_SYMBOL_GPL(devm_fpga_mgr_register); +EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register_full); + +/** + * __devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() + * @parent: fpga manager device from pdev + * @name: fpga manager name + * @mops: pointer to structure of fpga manager ops + * @priv: fpga manager private data + * @owner: owner module containing the ops + * + * Return: fpga manager pointer on success, negative error code otherwise. + * + * This is the devres variant of fpga_mgr_register() for which the + * unregister function will be called automatically when the managing + * device is detached. + */ +struct fpga_manager * +__devm_fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, + struct module *owner) +{ + struct fpga_manager_info info = { 0 }; + + info.name = name; + info.mops = mops; + info.priv = priv; + + return __devm_fpga_mgr_register_full(parent, &info, owner); +} +EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register); static void fpga_mgr_dev_release(struct device *dev) { + struct fpga_manager *mgr = to_fpga_manager(dev); + + ida_free(&fpga_mgr_ida, mgr->dev.id); + kfree(mgr); } +static const struct class fpga_mgr_class = { + .name = "fpga_manager", + .dev_groups = fpga_mgr_groups, + .dev_release = fpga_mgr_dev_release, +}; + static int __init fpga_mgr_class_init(void) { pr_info("FPGA manager framework\n"); - fpga_mgr_class = class_create(THIS_MODULE, "fpga_manager"); - if (IS_ERR(fpga_mgr_class)) - return PTR_ERR(fpga_mgr_class); - - fpga_mgr_class->dev_groups = fpga_mgr_groups; - fpga_mgr_class->dev_release = fpga_mgr_dev_release; - - return 0; + return class_register(&fpga_mgr_class); } static void __exit fpga_mgr_class_exit(void) { - class_destroy(fpga_mgr_class); + class_unregister(&fpga_mgr_class); ida_destroy(&fpga_mgr_ida); } diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index a4838715221f..753cd142503e 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -16,15 +16,15 @@ #include <linux/spinlock.h> static DEFINE_IDA(fpga_region_ida); -static struct class *fpga_region_class; +static const struct class fpga_region_class; -struct fpga_region *fpga_region_class_find( - struct device *start, const void *data, - int (*match)(struct device *, const void *)) +struct fpga_region * +fpga_region_class_find(struct device *start, const void *data, + int (*match)(struct device *, const void *)) { struct device *dev; - dev = class_find_device(fpga_region_class, start, data, match); + dev = class_find_device(&fpga_region_class, start, data, match); if (!dev) return NULL; @@ -38,9 +38,10 @@ EXPORT_SYMBOL_GPL(fpga_region_class_find); * * Caller should call fpga_region_put() when done with region. * - * Return fpga_region struct if successful. - * Return -EBUSY if someone already has a reference to the region. - * Return -ENODEV if @np is not an FPGA Region. + * Return: + * * fpga_region struct if successful. + * * -EBUSY if someone already has a reference to the region. + * * -ENODEV if can't take parent driver module refcount. */ static struct fpga_region *fpga_region_get(struct fpga_region *region) { @@ -52,7 +53,7 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region) } get_device(dev); - if (!try_module_get(dev->parent->driver->owner)) { + if (!try_module_get(region->ops_owner)) { put_device(dev); mutex_unlock(®ion->mutex); return ERR_PTR(-ENODEV); @@ -74,7 +75,7 @@ static void fpga_region_put(struct fpga_region *region) dev_dbg(dev, "put\n"); - module_put(dev->parent->driver->owner); + module_put(region->ops_owner); put_device(dev); mutex_unlock(®ion->mutex); } @@ -91,7 +92,7 @@ static void fpga_region_put(struct fpga_region *region) * The caller will need to call fpga_bridges_put() before attempting to * reprogram the region. * - * Return 0 for success or negative error code. + * Return: 0 for success or negative error code. */ int fpga_region_program_fpga(struct fpga_region *region) { @@ -180,40 +181,46 @@ static struct attribute *fpga_region_attrs[] = { ATTRIBUTE_GROUPS(fpga_region); /** - * fpga_region_create - alloc and init a struct fpga_region + * __fpga_region_register_full - create and register an FPGA Region device * @parent: device parent - * @mgr: manager that programs this region - * @get_bridges: optional function to get bridges to a list - * - * The caller of this function is responsible for freeing the resulting region - * struct with fpga_region_free(). Using devm_fpga_region_create() instead is - * recommended. + * @info: parameters for FPGA Region + * @owner: module containing the get_bridges function * - * Return: struct fpga_region or NULL + * Return: struct fpga_region or ERR_PTR() */ -struct fpga_region -*fpga_region_create(struct device *parent, - struct fpga_manager *mgr, - int (*get_bridges)(struct fpga_region *)) +struct fpga_region * +__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info, + struct module *owner) { struct fpga_region *region; int id, ret = 0; + if (!info) { + dev_err(parent, + "Attempt to register without required info structure\n"); + return ERR_PTR(-EINVAL); + } + region = kzalloc(sizeof(*region), GFP_KERNEL); if (!region) - return NULL; + return ERR_PTR(-ENOMEM); - id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL); - if (id < 0) + id = ida_alloc(&fpga_region_ida, GFP_KERNEL); + if (id < 0) { + ret = id; goto err_free; + } + + region->mgr = info->mgr; + region->compat_id = info->compat_id; + region->priv = info->priv; + region->get_bridges = info->get_bridges; + region->ops_owner = owner; - region->mgr = mgr; - region->get_bridges = get_bridges; mutex_init(®ion->mutex); INIT_LIST_HEAD(®ion->bridge_list); - device_initialize(®ion->dev); - region->dev.class = fpga_region_class; + region->dev.class = &fpga_region_class; region->dev.parent = parent; region->dev.of_node = parent->of_node; region->dev.id = id; @@ -222,85 +229,48 @@ struct fpga_region if (ret) goto err_remove; + ret = device_register(®ion->dev); + if (ret) { + put_device(®ion->dev); + return ERR_PTR(ret); + } + return region; err_remove: - ida_simple_remove(&fpga_region_ida, id); + ida_free(&fpga_region_ida, id); err_free: kfree(region); - return NULL; -} -EXPORT_SYMBOL_GPL(fpga_region_create); - -/** - * fpga_region_free - free an FPGA region created by fpga_region_create() - * @region: FPGA region - */ -void fpga_region_free(struct fpga_region *region) -{ - ida_simple_remove(&fpga_region_ida, region->dev.id); - kfree(region); -} -EXPORT_SYMBOL_GPL(fpga_region_free); - -static void devm_fpga_region_release(struct device *dev, void *res) -{ - struct fpga_region *region = *(struct fpga_region **)res; - - fpga_region_free(region); + return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(__fpga_region_register_full); /** - * devm_fpga_region_create - create and initialize a managed FPGA region struct + * __fpga_region_register - create and register an FPGA Region device * @parent: device parent * @mgr: manager that programs this region * @get_bridges: optional function to get bridges to a list + * @owner: module containing the get_bridges function * - * This function is intended for use in an FPGA region driver's probe function. - * After the region driver creates the region struct with - * devm_fpga_region_create(), it should register it with fpga_region_register(). - * The region driver's remove function should call fpga_region_unregister(). - * The region struct allocated with this function will be freed automatically on - * driver detach. This includes the case of a probe function returning error - * before calling fpga_region_register(), the struct will still get cleaned up. + * This simple version of the register function should be sufficient for most users. + * The fpga_region_register_full() function is available for users that need to + * pass additional, optional parameters. * - * Return: struct fpga_region or NULL + * Return: struct fpga_region or ERR_PTR() */ -struct fpga_region -*devm_fpga_region_create(struct device *parent, - struct fpga_manager *mgr, - int (*get_bridges)(struct fpga_region *)) +struct fpga_region * +__fpga_region_register(struct device *parent, struct fpga_manager *mgr, + int (*get_bridges)(struct fpga_region *), struct module *owner) { - struct fpga_region **ptr, *region; - - ptr = devres_alloc(devm_fpga_region_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; + struct fpga_region_info info = { 0 }; - region = fpga_region_create(parent, mgr, get_bridges); - if (!region) { - devres_free(ptr); - } else { - *ptr = region; - devres_add(parent, ptr); - } + info.mgr = mgr; + info.get_bridges = get_bridges; - return region; + return __fpga_region_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(devm_fpga_region_create); - -/** - * fpga_region_register - register an FPGA region - * @region: FPGA region - * - * Return: 0 or -errno - */ -int fpga_region_register(struct fpga_region *region) -{ - return device_add(®ion->dev); -} -EXPORT_SYMBOL_GPL(fpga_region_register); +EXPORT_SYMBOL_GPL(__fpga_region_register); /** * fpga_region_unregister - unregister an FPGA region @@ -316,27 +286,31 @@ EXPORT_SYMBOL_GPL(fpga_region_unregister); static void fpga_region_dev_release(struct device *dev) { + struct fpga_region *region = to_fpga_region(dev); + + ida_free(&fpga_region_ida, region->dev.id); + kfree(region); } +static const struct class fpga_region_class = { + .name = "fpga_region", + .dev_groups = fpga_region_groups, + .dev_release = fpga_region_dev_release, +}; + /** - * fpga_region_init - init function for fpga_region class - * Creates the fpga_region class and registers a reconfig notifier. + * fpga_region_init - creates the fpga_region class. + * + * Return: 0 on success or ERR_PTR() on error. */ static int __init fpga_region_init(void) { - fpga_region_class = class_create(THIS_MODULE, "fpga_region"); - if (IS_ERR(fpga_region_class)) - return PTR_ERR(fpga_region_class); - - fpga_region_class->dev_groups = fpga_region_groups; - fpga_region_class->dev_release = fpga_region_dev_release; - - return 0; + return class_register(&fpga_region_class); } static void __exit fpga_region_exit(void) { - class_destroy(fpga_region_class); + class_unregister(&fpga_region_class); ida_destroy(&fpga_region_ida); } diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c index 69dec5af23c3..62c30266130d 100644 --- a/drivers/fpga/ice40-spi.c +++ b/drivers/fpga/ice40-spi.c @@ -10,8 +10,8 @@ #include <linux/fpga/fpga-mgr.h> #include <linux/gpio/consumer.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_gpio.h> #include <linux/spi/spi.h> #include <linux/stringify.h> @@ -66,7 +66,7 @@ static int ice40_fpga_ops_write_init(struct fpga_manager *mgr, } /* Lock the bus, assert CRESET_B and SS_B and delay >200ns */ - spi_bus_lock(dev->master); + spi_bus_lock(dev->controller); gpiod_set_value(priv->reset, 1); @@ -94,7 +94,7 @@ static int ice40_fpga_ops_write_init(struct fpga_manager *mgr, ret = spi_sync_locked(dev, &message); fail: - spi_bus_unlock(dev->master); + spi_bus_unlock(dev->controller); return ret; } @@ -178,12 +178,9 @@ static int ice40_fpga_probe(struct spi_device *spi) return ret; } - mgr = devm_fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager", - &ice40_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, "Lattice iCE40 FPGA Manager", + &ice40_fpga_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } static const struct of_device_id ice40_fpga_of_match[] = { @@ -192,12 +189,19 @@ static const struct of_device_id ice40_fpga_of_match[] = { }; MODULE_DEVICE_TABLE(of, ice40_fpga_of_match); +static const struct spi_device_id ice40_fpga_spi_ids[] = { + { .name = "ice40-fpga-mgr", }, + {}, +}; +MODULE_DEVICE_TABLE(spi, ice40_fpga_spi_ids); + static struct spi_driver ice40_fpga_driver = { .probe = ice40_fpga_probe, .driver = { .name = "ice40spi", - .of_match_table = of_match_ptr(ice40_fpga_of_match), + .of_match_table = ice40_fpga_of_match, }, + .id_table = ice40_fpga_spi_ids, }; module_spi_driver(ice40_fpga_driver); diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c new file mode 100644 index 000000000000..10f678b9ed36 --- /dev/null +++ b/drivers/fpga/intel-m10-bmc-sec-update.c @@ -0,0 +1,774 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel MAX10 Board Management Controller Secure Update Driver + * + * Copyright (C) 2019-2022 Intel Corporation. All rights reserved. + * + */ +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/firmware.h> +#include <linux/mfd/intel-m10-bmc.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +struct m10bmc_sec; + +struct m10bmc_sec_ops { + int (*rsu_status)(struct m10bmc_sec *sec); +}; + +struct m10bmc_sec { + struct device *dev; + struct intel_m10bmc *m10bmc; + struct fw_upload *fwl; + char *fw_name; + u32 fw_name_id; + bool cancel_request; + const struct m10bmc_sec_ops *ops; +}; + +static DEFINE_XARRAY_ALLOC(fw_upload_xa); + +/* Root Entry Hash (REH) support */ +#define REH_SHA256_SIZE 32 +#define REH_SHA384_SIZE 48 +#define REH_MAGIC GENMASK(15, 0) +#define REH_SHA_NUM_BYTES GENMASK(31, 16) + +static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size) +{ + struct intel_m10bmc *m10bmc = sec->m10bmc; + unsigned int stride = regmap_get_reg_stride(m10bmc->regmap); + u32 write_count = size / stride; + u32 leftover_offset = write_count * stride; + u32 leftover_size = size - leftover_offset; + u32 leftover_tmp = 0; + int ret; + + if (sec->m10bmc->flash_bulk_ops) + return sec->m10bmc->flash_bulk_ops->write(m10bmc, buf, offset, size); + + if (WARN_ON_ONCE(stride > sizeof(leftover_tmp))) + return -EINVAL; + + ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset, + buf + offset, write_count); + if (ret) + return ret; + + /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */ + if (leftover_size) { + memcpy(&leftover_tmp, buf + leftover_offset, leftover_size); + ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset, + leftover_tmp); + if (ret) + return ret; + } + + return 0; +} + +static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size) +{ + struct intel_m10bmc *m10bmc = sec->m10bmc; + unsigned int stride = regmap_get_reg_stride(m10bmc->regmap); + u32 read_count = size / stride; + u32 leftover_offset = read_count * stride; + u32 leftover_size = size - leftover_offset; + u32 leftover_tmp; + int ret; + + if (sec->m10bmc->flash_bulk_ops) + return sec->m10bmc->flash_bulk_ops->read(m10bmc, buf, addr, size); + + if (WARN_ON_ONCE(stride > sizeof(leftover_tmp))) + return -EINVAL; + + ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count); + if (ret) + return ret; + + /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */ + if (leftover_size) { + ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp); + if (ret) + return ret; + memcpy(buf + leftover_offset, &leftover_tmp, leftover_size); + } + + return 0; +} + + +static ssize_t +show_root_entry_hash(struct device *dev, u32 exp_magic, + u32 prog_addr, u32 reh_addr, char *buf) +{ + struct m10bmc_sec *sec = dev_get_drvdata(dev); + int sha_num_bytes, i, ret, cnt = 0; + u8 hash[REH_SHA384_SIZE]; + u32 magic; + + ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic)); + if (ret) + return ret; + + if (FIELD_GET(REH_MAGIC, magic) != exp_magic) + return sysfs_emit(buf, "hash not programmed\n"); + + sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8; + if (sha_num_bytes != REH_SHA256_SIZE && + sha_num_bytes != REH_SHA384_SIZE) { + dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__, + sha_num_bytes); + return -EINVAL; + } + + ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes); + if (ret) { + dev_err(dev, "failed to read root entry hash\n"); + return ret; + } + + for (i = 0; i < sha_num_bytes; i++) + cnt += sprintf(buf + cnt, "%02x", hash[i]); + cnt += sprintf(buf + cnt, "\n"); + + return cnt; +} + +#define DEVICE_ATTR_SEC_REH_RO(_name) \ +static ssize_t _name##_root_entry_hash_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct m10bmc_sec *sec = dev_get_drvdata(dev); \ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \ + \ + return show_root_entry_hash(dev, csr_map->_name##_magic, \ + csr_map->_name##_prog_addr, \ + csr_map->_name##_reh_addr, \ + buf); \ +} \ +static DEVICE_ATTR_RO(_name##_root_entry_hash) + +DEVICE_ATTR_SEC_REH_RO(bmc); +DEVICE_ATTR_SEC_REH_RO(sr); +DEVICE_ATTR_SEC_REH_RO(pr); + +#define CSK_BIT_LEN 128U +#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32) + +static ssize_t +show_canceled_csk(struct device *dev, u32 addr, char *buf) +{ + unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32); + struct m10bmc_sec *sec = dev_get_drvdata(dev); + DECLARE_BITMAP(csk_map, CSK_BIT_LEN); + __le32 csk_le32[CSK_32ARRAY_SIZE]; + u32 csk32[CSK_32ARRAY_SIZE]; + int ret; + + ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size); + if (ret) { + dev_err(sec->dev, "failed to read CSK vector\n"); + return ret; + } + + for (i = 0; i < CSK_32ARRAY_SIZE; i++) + csk32[i] = le32_to_cpu(((csk_le32[i]))); + + bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN); + bitmap_complement(csk_map, csk_map, CSK_BIT_LEN); + return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN); +} + +#define DEVICE_ATTR_SEC_CSK_RO(_name) \ +static ssize_t _name##_canceled_csks_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct m10bmc_sec *sec = dev_get_drvdata(dev); \ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \ + \ + return show_canceled_csk(dev, \ + csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \ + buf); \ +} \ +static DEVICE_ATTR_RO(_name##_canceled_csks) + +#define CSK_VEC_OFFSET 0x34 + +DEVICE_ATTR_SEC_CSK_RO(bmc); +DEVICE_ATTR_SEC_CSK_RO(sr); +DEVICE_ATTR_SEC_CSK_RO(pr); + +#define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */ + +static ssize_t flash_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct m10bmc_sec *sec = dev_get_drvdata(dev); + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + unsigned int num_bits; + u8 *flash_buf; + int cnt, ret; + + num_bits = FLASH_COUNT_SIZE * 8; + + flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL); + if (!flash_buf) + return -ENOMEM; + + ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter, + FLASH_COUNT_SIZE); + if (ret) { + dev_err(sec->dev, "failed to read flash count\n"); + goto exit_free; + } + cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits); + +exit_free: + kfree(flash_buf); + + return ret ? : sysfs_emit(buf, "%u\n", cnt); +} +static DEVICE_ATTR_RO(flash_count); + +static struct attribute *m10bmc_security_attrs[] = { + &dev_attr_flash_count.attr, + &dev_attr_bmc_root_entry_hash.attr, + &dev_attr_sr_root_entry_hash.attr, + &dev_attr_pr_root_entry_hash.attr, + &dev_attr_sr_canceled_csks.attr, + &dev_attr_pr_canceled_csks.attr, + &dev_attr_bmc_canceled_csks.attr, + NULL, +}; + +static struct attribute_group m10bmc_security_attr_group = { + .name = "security", + .attrs = m10bmc_security_attrs, +}; + +static const struct attribute_group *m10bmc_sec_attr_groups[] = { + &m10bmc_security_attr_group, + NULL, +}; + +static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 auth_result; + + dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell); + + if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result)) + dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result); +} + +static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 doorbell; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); + if (ret) + return ret; + + return FIELD_GET(DRBL_RSU_STATUS, doorbell); +} + +static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec *sec) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 auth_result; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result); + if (ret) + return ret; + + return FIELD_GET(AUTH_RESULT_RSU_STATUS, auth_result); +} + +static bool rsu_status_ok(u32 status) +{ + return (status == RSU_STAT_NORMAL || + status == RSU_STAT_NIOS_OK || + status == RSU_STAT_USER_OK || + status == RSU_STAT_FACTORY_OK); +} + +static bool rsu_progress_done(u32 progress) +{ + return (progress == RSU_PROG_IDLE || + progress == RSU_PROG_RSU_DONE); +} + +static bool rsu_progress_busy(u32 progress) +{ + return (progress == RSU_PROG_AUTHENTICATING || + progress == RSU_PROG_COPYING || + progress == RSU_PROG_UPDATE_CANCEL || + progress == RSU_PROG_PROGRAM_KEY_HASH); +} + +static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg, + u32 *progress, u32 *status) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg); + if (ret) + return ret; + + ret = sec->ops->rsu_status(sec); + if (ret < 0) + return ret; + + *status = ret; + *progress = rsu_prog(*doorbell_reg); + + return 0; +} + +static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 doorbell; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; + + if (!rsu_progress_done(rsu_prog(doorbell))) { + log_error_regs(sec, doorbell); + return FW_UPLOAD_ERR_BUSY; + } + + return FW_UPLOAD_ERR_NONE; +} + +static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status) +{ + if (doorbell_reg & DRBL_RSU_REQUEST) + return false; + + if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT) + return true; + + if (!rsu_progress_done(progress)) + return true; + + return false; +} + +static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 doorbell_reg, progress, status; + int ret, err; + + ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell, + DRBL_RSU_REQUEST | DRBL_HOST_STATUS, + DRBL_RSU_REQUEST | + FIELD_PREP(DRBL_HOST_STATUS, + HOST_STATUS_IDLE)); + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; + + ret = read_poll_timeout(m10bmc_sec_progress_status, err, + err < 0 || rsu_start_done(doorbell_reg, progress, status), + NIOS_HANDSHAKE_INTERVAL_US, + NIOS_HANDSHAKE_TIMEOUT_US, + false, + sec, &doorbell_reg, &progress, &status); + + if (ret == -ETIMEDOUT) { + log_error_regs(sec, doorbell_reg); + return FW_UPLOAD_ERR_TIMEOUT; + } else if (err) { + return FW_UPLOAD_ERR_RW_ERROR; + } + + if (status == RSU_STAT_WEAROUT) { + dev_warn(sec->dev, "Excessive flash update count detected\n"); + return FW_UPLOAD_ERR_WEAROUT; + } else if (status == RSU_STAT_ERASE_FAIL) { + log_error_regs(sec, doorbell_reg); + return FW_UPLOAD_ERR_HW_ERROR; + } + + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + unsigned long poll_timeout; + u32 doorbell, progress; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; + + poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS); + while (rsu_prog(doorbell) == RSU_PROG_PREPARE) { + msleep(RSU_PREP_INTERVAL_MS); + if (time_after(jiffies, poll_timeout)) + break; + + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; + } + + progress = rsu_prog(doorbell); + if (progress == RSU_PROG_PREPARE) { + log_error_regs(sec, doorbell); + return FW_UPLOAD_ERR_TIMEOUT; + } else if (progress != RSU_PROG_READY) { + log_error_regs(sec, doorbell); + return FW_UPLOAD_ERR_HW_ERROR; + } + + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 doorbell_reg, status; + int ret; + + ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell, + DRBL_HOST_STATUS, + FIELD_PREP(DRBL_HOST_STATUS, + HOST_STATUS_WRITE_DONE)); + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; + + ret = regmap_read_poll_timeout(sec->m10bmc->regmap, + csr_map->base + csr_map->doorbell, + doorbell_reg, + rsu_prog(doorbell_reg) != RSU_PROG_READY, + NIOS_HANDSHAKE_INTERVAL_US, + NIOS_HANDSHAKE_TIMEOUT_US); + + if (ret == -ETIMEDOUT) { + log_error_regs(sec, doorbell_reg); + return FW_UPLOAD_ERR_TIMEOUT; + } else if (ret) { + return FW_UPLOAD_ERR_RW_ERROR; + } + + ret = sec->ops->rsu_status(sec); + if (ret < 0) + return FW_UPLOAD_ERR_HW_ERROR; + status = ret; + + if (!rsu_status_ok(status)) { + log_error_regs(sec, doorbell_reg); + return FW_UPLOAD_ERR_HW_ERROR; + } + + return FW_UPLOAD_ERR_NONE; +} + +static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg) +{ + u32 progress, status; + + if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status)) + return -EIO; + + if (!rsu_status_ok(status)) + return -EINVAL; + + if (rsu_progress_done(progress)) + return 0; + + if (rsu_progress_busy(progress)) + return -EAGAIN; + + return -EINVAL; +} + +static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 doorbell; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; + + if (rsu_prog(doorbell) != RSU_PROG_READY) + return FW_UPLOAD_ERR_BUSY; + + ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell, + DRBL_HOST_STATUS, + FIELD_PREP(DRBL_HOST_STATUS, + HOST_STATUS_ABORT_RSU)); + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; + + return FW_UPLOAD_ERR_CANCELED; +} + +static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl, + const u8 *data, u32 size) +{ + struct m10bmc_sec *sec = fwl->dd_handle; + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 ret; + + sec->cancel_request = false; + + if (!size || size > csr_map->staging_size) + return FW_UPLOAD_ERR_INVALID_SIZE; + + if (sec->m10bmc->flash_bulk_ops) + if (sec->m10bmc->flash_bulk_ops->lock_write(sec->m10bmc)) + return FW_UPLOAD_ERR_BUSY; + + ret = rsu_check_idle(sec); + if (ret != FW_UPLOAD_ERR_NONE) + goto unlock_flash; + + m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PREPARE); + + ret = rsu_update_init(sec); + if (ret != FW_UPLOAD_ERR_NONE) + goto fw_state_exit; + + ret = rsu_prog_ready(sec); + if (ret != FW_UPLOAD_ERR_NONE) + goto fw_state_exit; + + if (sec->cancel_request) { + ret = rsu_cancel(sec); + goto fw_state_exit; + } + + m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_WRITE); + + return FW_UPLOAD_ERR_NONE; + +fw_state_exit: + m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL); + +unlock_flash: + if (sec->m10bmc->flash_bulk_ops) + sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc); + return ret; +} + +#define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */ + +static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data, + u32 offset, u32 size, u32 *written) +{ + struct m10bmc_sec *sec = fwl->dd_handle; + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + struct intel_m10bmc *m10bmc = sec->m10bmc; + u32 blk_size, doorbell; + int ret; + + if (sec->cancel_request) + return rsu_cancel(sec); + + ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell); + if (ret) { + return FW_UPLOAD_ERR_RW_ERROR; + } else if (rsu_prog(doorbell) != RSU_PROG_READY) { + log_error_regs(sec, doorbell); + return FW_UPLOAD_ERR_HW_ERROR; + } + + WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap)); + blk_size = min_t(u32, WRITE_BLOCK_SIZE, size); + ret = m10bmc_sec_write(sec, data, offset, blk_size); + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; + + *written = blk_size; + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl) +{ + struct m10bmc_sec *sec = fwl->dd_handle; + unsigned long poll_timeout; + u32 doorbell, result; + int ret; + + if (sec->cancel_request) + return rsu_cancel(sec); + + m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PROGRAM); + + result = rsu_send_data(sec); + if (result != FW_UPLOAD_ERR_NONE) + return result; + + poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS); + do { + msleep(RSU_COMPLETE_INTERVAL_MS); + ret = rsu_check_complete(sec, &doorbell); + } while (ret == -EAGAIN && !time_after(jiffies, poll_timeout)); + + if (ret == -EAGAIN) { + log_error_regs(sec, doorbell); + return FW_UPLOAD_ERR_TIMEOUT; + } else if (ret == -EIO) { + return FW_UPLOAD_ERR_RW_ERROR; + } else if (ret) { + log_error_regs(sec, doorbell); + return FW_UPLOAD_ERR_HW_ERROR; + } + + return FW_UPLOAD_ERR_NONE; +} + +/* + * m10bmc_sec_cancel() may be called asynchronously with an on-going update. + * All other functions are called sequentially in a single thread. To avoid + * contention on register accesses, m10bmc_sec_cancel() must only update + * the cancel_request flag. Other functions will check this flag and handle + * the cancel request synchronously. + */ +static void m10bmc_sec_cancel(struct fw_upload *fwl) +{ + struct m10bmc_sec *sec = fwl->dd_handle; + + sec->cancel_request = true; +} + +static void m10bmc_sec_cleanup(struct fw_upload *fwl) +{ + struct m10bmc_sec *sec = fwl->dd_handle; + + (void)rsu_cancel(sec); + + m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL); + + if (sec->m10bmc->flash_bulk_ops) + sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc); +} + +static const struct fw_upload_ops m10bmc_ops = { + .prepare = m10bmc_sec_prepare, + .write = m10bmc_sec_fw_write, + .poll_complete = m10bmc_sec_poll_complete, + .cancel = m10bmc_sec_cancel, + .cleanup = m10bmc_sec_cleanup, +}; + +static const struct m10bmc_sec_ops m10sec_n3000_ops = { + .rsu_status = m10bmc_sec_n3000_rsu_status, +}; + +static const struct m10bmc_sec_ops m10sec_n6000_ops = { + .rsu_status = m10bmc_sec_n6000_rsu_status, +}; + +#define SEC_UPDATE_LEN_MAX 32 +static int m10bmc_sec_probe(struct platform_device *pdev) +{ + char buf[SEC_UPDATE_LEN_MAX]; + struct m10bmc_sec *sec; + struct fw_upload *fwl; + unsigned int len; + int ret; + + sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + + sec->dev = &pdev->dev; + sec->m10bmc = dev_get_drvdata(pdev->dev.parent); + sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data; + dev_set_drvdata(&pdev->dev, sec); + + ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec, + xa_limit_32b, GFP_KERNEL); + if (ret) + return ret; + + len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d", + sec->fw_name_id); + sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL); + if (!sec->fw_name) { + ret = -ENOMEM; + goto fw_name_fail; + } + + fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name, + &m10bmc_ops, sec); + if (IS_ERR(fwl)) { + dev_err(sec->dev, "Firmware Upload driver failed to start\n"); + ret = PTR_ERR(fwl); + goto fw_uploader_fail; + } + + sec->fwl = fwl; + return 0; + +fw_uploader_fail: + kfree(sec->fw_name); +fw_name_fail: + xa_erase(&fw_upload_xa, sec->fw_name_id); + return ret; +} + +static void m10bmc_sec_remove(struct platform_device *pdev) +{ + struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev); + + firmware_upload_unregister(sec->fwl); + kfree(sec->fw_name); + xa_erase(&fw_upload_xa, sec->fw_name_id); +} + +static const struct platform_device_id intel_m10bmc_sec_ids[] = { + { + .name = "n3000bmc-sec-update", + .driver_data = (kernel_ulong_t)&m10sec_n3000_ops, + }, + { + .name = "d5005bmc-sec-update", + .driver_data = (kernel_ulong_t)&m10sec_n3000_ops, + }, + { + .name = "n6000bmc-sec-update", + .driver_data = (kernel_ulong_t)&m10sec_n6000_ops, + }, + { } +}; +MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids); + +static struct platform_driver intel_m10bmc_sec_driver = { + .probe = m10bmc_sec_probe, + .remove = m10bmc_sec_remove, + .driver = { + .name = "intel-m10bmc-sec-update", + .dev_groups = m10bmc_sec_attr_groups, + }, + .id_table = intel_m10bmc_sec_ids, +}; +module_platform_driver(intel_m10bmc_sec_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("INTEL_M10_BMC_CORE"); diff --git a/drivers/fpga/lattice-sysconfig-spi.c b/drivers/fpga/lattice-sysconfig-spi.c new file mode 100644 index 000000000000..44691cfcf50a --- /dev/null +++ b/drivers/fpga/lattice-sysconfig-spi.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lattice FPGA programming over slave SPI sysCONFIG interface. + */ + +#include <linux/of.h> +#include <linux/spi/spi.h> + +#include "lattice-sysconfig.h" + +static const u32 ecp5_spi_max_speed_hz = 60000000; + +static int sysconfig_spi_cmd_transfer(struct sysconfig_priv *priv, + const void *tx_buf, size_t tx_len, + void *rx_buf, size_t rx_len) +{ + struct spi_device *spi = to_spi_device(priv->dev); + + return spi_write_then_read(spi, tx_buf, tx_len, rx_buf, rx_len); +} + +static int sysconfig_spi_bitstream_burst_init(struct sysconfig_priv *priv) +{ + const u8 lsc_bitstream_burst[] = SYSCONFIG_LSC_BITSTREAM_BURST; + struct spi_device *spi = to_spi_device(priv->dev); + struct spi_transfer xfer = {}; + struct spi_message msg; + size_t buf_len; + void *buf; + int ret; + + buf_len = sizeof(lsc_bitstream_burst); + + buf = kmemdup(lsc_bitstream_burst, buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + xfer.len = buf_len; + xfer.tx_buf = buf; + xfer.cs_change = 1; + + spi_message_init_with_transfers(&msg, &xfer, 1); + + /* + * Lock SPI bus for exclusive usage until FPGA programming is done. + * SPI bus will be released in sysconfig_spi_bitstream_burst_complete(). + */ + spi_bus_lock(spi->controller); + + ret = spi_sync_locked(spi, &msg); + if (ret) + spi_bus_unlock(spi->controller); + + kfree(buf); + + return ret; +} + +static int sysconfig_spi_bitstream_burst_write(struct sysconfig_priv *priv, + const char *buf, size_t len) +{ + struct spi_device *spi = to_spi_device(priv->dev); + struct spi_transfer xfer = { + .tx_buf = buf, + .len = len, + .cs_change = 1, + }; + struct spi_message msg; + + spi_message_init_with_transfers(&msg, &xfer, 1); + + return spi_sync_locked(spi, &msg); +} + +static int sysconfig_spi_bitstream_burst_complete(struct sysconfig_priv *priv) +{ + struct spi_device *spi = to_spi_device(priv->dev); + + /* Bitstream burst write is done, release SPI bus */ + spi_bus_unlock(spi->controller); + + /* Toggle CS to finish bitstream write */ + return spi_write(spi, NULL, 0); +} + +static int sysconfig_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *dev_id; + struct device *dev = &spi->dev; + struct sysconfig_priv *priv; + const u32 *spi_max_speed; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spi_max_speed = device_get_match_data(dev); + if (!spi_max_speed) { + dev_id = spi_get_device_id(spi); + if (!dev_id) + return -ENODEV; + + spi_max_speed = (const u32 *)dev_id->driver_data; + } + + if (!spi_max_speed) + return -EINVAL; + + if (spi->max_speed_hz > *spi_max_speed) { + dev_err(dev, "SPI speed %u is too high, maximum speed is %u\n", + spi->max_speed_hz, *spi_max_speed); + return -EINVAL; + } + + priv->dev = dev; + priv->command_transfer = sysconfig_spi_cmd_transfer; + priv->bitstream_burst_write_init = sysconfig_spi_bitstream_burst_init; + priv->bitstream_burst_write = sysconfig_spi_bitstream_burst_write; + priv->bitstream_burst_write_complete = sysconfig_spi_bitstream_burst_complete; + + return sysconfig_probe(priv); +} + +static const struct spi_device_id sysconfig_spi_ids[] = { + { + .name = "sysconfig-ecp5", + .driver_data = (kernel_ulong_t)&ecp5_spi_max_speed_hz, + }, {}, +}; +MODULE_DEVICE_TABLE(spi, sysconfig_spi_ids); + +#if IS_ENABLED(CONFIG_OF) +static const struct of_device_id sysconfig_of_ids[] = { + { + .compatible = "lattice,sysconfig-ecp5", + .data = &ecp5_spi_max_speed_hz, + }, {}, +}; +MODULE_DEVICE_TABLE(of, sysconfig_of_ids); +#endif /* IS_ENABLED(CONFIG_OF) */ + +static struct spi_driver lattice_sysconfig_driver = { + .probe = sysconfig_spi_probe, + .id_table = sysconfig_spi_ids, + .driver = { + .name = "lattice_sysconfig_spi_fpga_mgr", + .of_match_table = of_match_ptr(sysconfig_of_ids), + }, +}; +module_spi_driver(lattice_sysconfig_driver); + +MODULE_DESCRIPTION("Lattice sysCONFIG Slave SPI FPGA Manager"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/lattice-sysconfig.c b/drivers/fpga/lattice-sysconfig.c new file mode 100644 index 000000000000..ba51a60f672f --- /dev/null +++ b/drivers/fpga/lattice-sysconfig.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lattice FPGA sysCONFIG interface functions independent of port type. + */ + +#include <linux/delay.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/gpio/consumer.h> +#include <linux/iopoll.h> + +#include "lattice-sysconfig.h" + +static int sysconfig_cmd_write(struct sysconfig_priv *priv, const void *buf, + size_t buf_len) +{ + return priv->command_transfer(priv, buf, buf_len, NULL, 0); +} + +static int sysconfig_cmd_read(struct sysconfig_priv *priv, const void *tx_buf, + size_t tx_len, void *rx_buf, size_t rx_len) +{ + return priv->command_transfer(priv, tx_buf, tx_len, rx_buf, rx_len); +} + +static int sysconfig_read_busy(struct sysconfig_priv *priv) +{ + const u8 lsc_check_busy[] = SYSCONFIG_LSC_CHECK_BUSY; + u8 busy; + int ret; + + ret = sysconfig_cmd_read(priv, lsc_check_busy, sizeof(lsc_check_busy), + &busy, sizeof(busy)); + + return ret ? : busy; +} + +static int sysconfig_poll_busy(struct sysconfig_priv *priv) +{ + int ret, busy; + + ret = read_poll_timeout(sysconfig_read_busy, busy, busy <= 0, + SYSCONFIG_POLL_INTERVAL_US, + SYSCONFIG_POLL_BUSY_TIMEOUT_US, false, priv); + + return ret ? : busy; +} + +static int sysconfig_read_status(struct sysconfig_priv *priv, u32 *status) +{ + const u8 lsc_read_status[] = SYSCONFIG_LSC_READ_STATUS; + __be32 device_status; + int ret; + + ret = sysconfig_cmd_read(priv, lsc_read_status, sizeof(lsc_read_status), + &device_status, sizeof(device_status)); + if (ret) + return ret; + + *status = be32_to_cpu(device_status); + + return 0; +} + +static int sysconfig_poll_status(struct sysconfig_priv *priv, u32 *status) +{ + int ret = sysconfig_poll_busy(priv); + + if (ret) + return ret; + + return sysconfig_read_status(priv, status); +} + +static int sysconfig_poll_gpio(struct gpio_desc *gpio, bool is_active) +{ + int ret, val; + + ret = read_poll_timeout(gpiod_get_value, val, + val < 0 || !!val == is_active, + SYSCONFIG_POLL_INTERVAL_US, + SYSCONFIG_POLL_GPIO_TIMEOUT_US, false, gpio); + + if (val < 0) + return val; + + return ret; +} + +static int sysconfig_gpio_refresh(struct sysconfig_priv *priv) +{ + struct gpio_desc *program = priv->program; + struct gpio_desc *init = priv->init; + struct gpio_desc *done = priv->done; + int ret; + + /* Enter init mode */ + gpiod_set_value(program, 1); + + ret = sysconfig_poll_gpio(init, true); + if (!ret) + ret = sysconfig_poll_gpio(done, false); + + if (ret) + return ret; + + /* Enter program mode */ + gpiod_set_value(program, 0); + + return sysconfig_poll_gpio(init, false); +} + +static int sysconfig_lsc_refresh(struct sysconfig_priv *priv) +{ + static const u8 lsc_refresh[] = SYSCONFIG_LSC_REFRESH; + int ret; + + ret = sysconfig_cmd_write(priv, lsc_refresh, sizeof(lsc_refresh)); + if (ret) + return ret; + + usleep_range(4000, 8000); + + return 0; +} + +static int sysconfig_refresh(struct sysconfig_priv *priv) +{ + struct gpio_desc *program = priv->program; + struct gpio_desc *init = priv->init; + struct gpio_desc *done = priv->done; + + if (program && init && done) + return sysconfig_gpio_refresh(priv); + + return sysconfig_lsc_refresh(priv); +} + +static int sysconfig_isc_enable(struct sysconfig_priv *priv) +{ + u8 isc_enable[] = SYSCONFIG_ISC_ENABLE; + u32 status; + int ret; + + ret = sysconfig_cmd_write(priv, isc_enable, sizeof(isc_enable)); + if (ret) + return ret; + + ret = sysconfig_poll_status(priv, &status); + if (ret) + return ret; + + if (status & SYSCONFIG_STATUS_FAIL) + return -EFAULT; + + return 0; +} + +static int sysconfig_isc_erase(struct sysconfig_priv *priv) +{ + u8 isc_erase[] = SYSCONFIG_ISC_ERASE; + u32 status; + int ret; + + ret = sysconfig_cmd_write(priv, isc_erase, sizeof(isc_erase)); + if (ret) + return ret; + + ret = sysconfig_poll_status(priv, &status); + if (ret) + return ret; + + if (status & SYSCONFIG_STATUS_FAIL) + return -EFAULT; + + return 0; +} + +static int sysconfig_isc_init(struct sysconfig_priv *priv) +{ + int ret = sysconfig_isc_enable(priv); + + if (ret) + return ret; + + return sysconfig_isc_erase(priv); +} + +static int sysconfig_lsc_init_addr(struct sysconfig_priv *priv) +{ + const u8 lsc_init_addr[] = SYSCONFIG_LSC_INIT_ADDR; + + return sysconfig_cmd_write(priv, lsc_init_addr, sizeof(lsc_init_addr)); +} + +static int sysconfig_burst_write_init(struct sysconfig_priv *priv) +{ + return priv->bitstream_burst_write_init(priv); +} + +static int sysconfig_burst_write_complete(struct sysconfig_priv *priv) +{ + return priv->bitstream_burst_write_complete(priv); +} + +static int sysconfig_bitstream_burst_write(struct sysconfig_priv *priv, + const char *buf, size_t count) +{ + int ret = priv->bitstream_burst_write(priv, buf, count); + + if (ret) + sysconfig_burst_write_complete(priv); + + return ret; +} + +static int sysconfig_isc_disable(struct sysconfig_priv *priv) +{ + const u8 isc_disable[] = SYSCONFIG_ISC_DISABLE; + + return sysconfig_cmd_write(priv, isc_disable, sizeof(isc_disable)); +} + +static void sysconfig_cleanup(struct sysconfig_priv *priv) +{ + sysconfig_isc_erase(priv); + sysconfig_refresh(priv); +} + +static int sysconfig_isc_finish(struct sysconfig_priv *priv) +{ + struct gpio_desc *done_gpio = priv->done; + u32 status; + int ret; + + if (done_gpio) { + ret = sysconfig_isc_disable(priv); + if (ret) + return ret; + + return sysconfig_poll_gpio(done_gpio, true); + } + + ret = sysconfig_poll_status(priv, &status); + if (ret) + return ret; + + if ((status & SYSCONFIG_STATUS_DONE) && + !(status & SYSCONFIG_STATUS_BUSY) && + !(status & SYSCONFIG_STATUS_ERR)) + return sysconfig_isc_disable(priv); + + return -EFAULT; +} + +static enum fpga_mgr_states sysconfig_ops_state(struct fpga_manager *mgr) +{ + struct sysconfig_priv *priv = mgr->priv; + struct gpio_desc *done = priv->done; + u32 status; + int ret; + + if (done && (gpiod_get_value(done) > 0)) + return FPGA_MGR_STATE_OPERATING; + + ret = sysconfig_read_status(priv, &status); + if (!ret && (status & SYSCONFIG_STATUS_DONE)) + return FPGA_MGR_STATE_OPERATING; + + return FPGA_MGR_STATE_UNKNOWN; +} + +static int sysconfig_ops_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count) +{ + struct sysconfig_priv *priv = mgr->priv; + struct device *dev = &mgr->dev; + int ret; + + if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { + dev_err(dev, "Partial reconfiguration is not supported\n"); + return -EOPNOTSUPP; + } + + /* Enter program mode */ + ret = sysconfig_refresh(priv); + if (ret) { + dev_err(dev, "Failed to go to program mode\n"); + return ret; + } + + /* Enter ISC mode */ + ret = sysconfig_isc_init(priv); + if (ret) { + dev_err(dev, "Failed to go to ISC mode\n"); + return ret; + } + + /* Initialize the Address Shift Register */ + ret = sysconfig_lsc_init_addr(priv); + if (ret) { + dev_err(dev, + "Failed to initialize the Address Shift Register\n"); + return ret; + } + + /* Prepare for bitstream burst write */ + ret = sysconfig_burst_write_init(priv); + if (ret) + dev_err(dev, "Failed to prepare for bitstream burst write\n"); + + return ret; +} + +static int sysconfig_ops_write(struct fpga_manager *mgr, const char *buf, + size_t count) +{ + return sysconfig_bitstream_burst_write(mgr->priv, buf, count); +} + +static int sysconfig_ops_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + struct sysconfig_priv *priv = mgr->priv; + struct device *dev = &mgr->dev; + int ret; + + ret = sysconfig_burst_write_complete(priv); + if (!ret) + ret = sysconfig_poll_busy(priv); + + if (ret) { + dev_err(dev, "Error while waiting bitstream write to finish\n"); + goto fail; + } + + ret = sysconfig_isc_finish(priv); + +fail: + if (ret) + sysconfig_cleanup(priv); + + return ret; +} + +static const struct fpga_manager_ops sysconfig_fpga_mgr_ops = { + .state = sysconfig_ops_state, + .write_init = sysconfig_ops_write_init, + .write = sysconfig_ops_write, + .write_complete = sysconfig_ops_write_complete, +}; + +int sysconfig_probe(struct sysconfig_priv *priv) +{ + struct gpio_desc *program, *init, *done; + struct device *dev = priv->dev; + struct fpga_manager *mgr; + + if (!dev) + return -ENODEV; + + if (!priv->command_transfer || + !priv->bitstream_burst_write_init || + !priv->bitstream_burst_write || + !priv->bitstream_burst_write_complete) { + dev_err(dev, "Essential callback is missing\n"); + return -EINVAL; + } + + program = devm_gpiod_get_optional(dev, "program", GPIOD_OUT_LOW); + if (IS_ERR(program)) + return dev_err_probe(dev, PTR_ERR(program), + "Failed to get PROGRAM GPIO\n"); + + init = devm_gpiod_get_optional(dev, "init", GPIOD_IN); + if (IS_ERR(init)) + return dev_err_probe(dev, PTR_ERR(init), + "Failed to get INIT GPIO\n"); + + done = devm_gpiod_get_optional(dev, "done", GPIOD_IN); + if (IS_ERR(done)) + return dev_err_probe(dev, PTR_ERR(done), + "Failed to get DONE GPIO\n"); + + priv->program = program; + priv->init = init; + priv->done = done; + + mgr = devm_fpga_mgr_register(dev, "Lattice sysCONFIG FPGA Manager", + &sysconfig_fpga_mgr_ops, priv); + + return PTR_ERR_OR_ZERO(mgr); +} +EXPORT_SYMBOL(sysconfig_probe); + +MODULE_DESCRIPTION("Lattice sysCONFIG FPGA Manager Core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/lattice-sysconfig.h b/drivers/fpga/lattice-sysconfig.h new file mode 100644 index 000000000000..df47d9a524f6 --- /dev/null +++ b/drivers/fpga/lattice-sysconfig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LATTICE_SYSCONFIG_H +#define __LATTICE_SYSCONFIG_H + +#define SYSCONFIG_ISC_ENABLE {0xC6, 0x00, 0x00, 0x00} +#define SYSCONFIG_ISC_DISABLE {0x26, 0x00, 0x00, 0x00} +#define SYSCONFIG_ISC_ERASE {0x0E, 0x01, 0x00, 0x00} +#define SYSCONFIG_LSC_READ_STATUS {0x3C, 0x00, 0x00, 0x00} +#define SYSCONFIG_LSC_CHECK_BUSY {0xF0, 0x00, 0x00, 0x00} +#define SYSCONFIG_LSC_REFRESH {0x79, 0x00, 0x00, 0x00} +#define SYSCONFIG_LSC_INIT_ADDR {0x46, 0x00, 0x00, 0x00} +#define SYSCONFIG_LSC_BITSTREAM_BURST {0x7a, 0x00, 0x00, 0x00} + +#define SYSCONFIG_STATUS_DONE BIT(8) +#define SYSCONFIG_STATUS_BUSY BIT(12) +#define SYSCONFIG_STATUS_FAIL BIT(13) +#define SYSCONFIG_STATUS_ERR GENMASK(25, 23) + +#define SYSCONFIG_POLL_INTERVAL_US 30 +#define SYSCONFIG_POLL_BUSY_TIMEOUT_US 1000000 +#define SYSCONFIG_POLL_GPIO_TIMEOUT_US 100000 + +struct sysconfig_priv { + struct gpio_desc *program; + struct gpio_desc *init; + struct gpio_desc *done; + struct device *dev; + int (*command_transfer)(struct sysconfig_priv *priv, const void *tx_buf, + size_t tx_len, void *rx_buf, size_t rx_len); + int (*bitstream_burst_write_init)(struct sysconfig_priv *priv); + int (*bitstream_burst_write)(struct sysconfig_priv *priv, + const char *tx_buf, size_t tx_len); + int (*bitstream_burst_write_complete)(struct sysconfig_priv *priv); +}; + +int sysconfig_probe(struct sysconfig_priv *priv); + +#endif /* __LATTICE_SYSCONFIG_H */ diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c index 1afb41aa20d7..905607992a12 100644 --- a/drivers/fpga/machxo2-spi.c +++ b/drivers/fpga/machxo2-spi.c @@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr, goto fail; get_status(spi, &status); - if (test_bit(FAIL, &status)) + if (test_bit(FAIL, &status)) { + ret = -EINVAL; goto fail; + } dump_status_reg(&status); spi_message_init(&msg); @@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr, dump_status_reg(&status); if (!test_bit(DONE, &status)) { machxo2_cleanup(mgr); + ret = -EINVAL; goto fail; } @@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr, break; if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) { machxo2_cleanup(mgr); + ret = -EINVAL; goto fail; } } while (1); @@ -366,12 +370,9 @@ static int machxo2_spi_probe(struct spi_device *spi) return -EINVAL; } - mgr = devm_fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager", - &machxo2_ops, spi); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, "Lattice MachXO2 SPI FPGA Manager", + &machxo2_ops, spi); + return PTR_ERR_OR_ZERO(mgr); } #ifdef CONFIG_OF diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c new file mode 100644 index 000000000000..6134cea86ac8 --- /dev/null +++ b/drivers/fpga/microchip-spi.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip Polarfire FPGA programming over slave SPI interface. + */ + +#include <linux/unaligned.h> +#include <linux/delay.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/spi/spi.h> + +#define MPF_SPI_ISC_ENABLE 0x0B +#define MPF_SPI_ISC_DISABLE 0x0C +#define MPF_SPI_READ_STATUS 0x00 +#define MPF_SPI_READ_DATA 0x01 +#define MPF_SPI_FRAME_INIT 0xAE +#define MPF_SPI_FRAME 0xEE +#define MPF_SPI_PRG_MODE 0x01 +#define MPF_SPI_RELEASE 0x23 + +#define MPF_SPI_FRAME_SIZE 16 + +#define MPF_HEADER_SIZE_OFFSET 24 +#define MPF_DATA_SIZE_OFFSET 55 + +#define MPF_LOOKUP_TABLE_RECORD_SIZE 9 +#define MPF_LOOKUP_TABLE_BLOCK_ID_OFFSET 0 +#define MPF_LOOKUP_TABLE_BLOCK_START_OFFSET 1 + +#define MPF_COMPONENTS_SIZE_ID 5 +#define MPF_BITSTREAM_ID 8 + +#define MPF_BITS_PER_COMPONENT_SIZE 22 + +#define MPF_STATUS_POLL_TIMEOUT (2 * USEC_PER_SEC) +#define MPF_STATUS_BUSY BIT(0) +#define MPF_STATUS_READY BIT(1) +#define MPF_STATUS_SPI_VIOLATION BIT(2) +#define MPF_STATUS_SPI_ERROR BIT(3) + +struct mpf_priv { + struct spi_device *spi; + bool program_mode; + u8 tx __aligned(ARCH_KMALLOC_MINALIGN); + u8 rx; +}; + +static int mpf_read_status(struct mpf_priv *priv) +{ + /* + * HW status is returned on MISO in the first byte after CS went + * active. However, first reading can be inadequate, so we submit + * two identical SPI transfers and use result of the later one. + */ + struct spi_transfer xfers[2] = { + { + .tx_buf = &priv->tx, + .rx_buf = &priv->rx, + .len = 1, + .cs_change = 1, + }, { + .tx_buf = &priv->tx, + .rx_buf = &priv->rx, + .len = 1, + }, + }; + u8 status; + int ret; + + priv->tx = MPF_SPI_READ_STATUS; + + ret = spi_sync_transfer(priv->spi, xfers, 2); + if (ret) + return ret; + + status = priv->rx; + + if ((status & MPF_STATUS_SPI_VIOLATION) || + (status & MPF_STATUS_SPI_ERROR)) + return -EIO; + + return status; +} + +static enum fpga_mgr_states mpf_ops_state(struct fpga_manager *mgr) +{ + struct mpf_priv *priv = mgr->priv; + bool program_mode; + int status; + + program_mode = priv->program_mode; + status = mpf_read_status(priv); + + if (!program_mode && !status) + return FPGA_MGR_STATE_OPERATING; + + return FPGA_MGR_STATE_UNKNOWN; +} + +static int mpf_ops_parse_header(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count) +{ + size_t component_size_byte_num, component_size_byte_off, + components_size_start, bitstream_start, + block_id_offset, block_start_offset; + u8 header_size, blocks_num, block_id; + u32 block_start, component_size; + u16 components_num, i; + + if (!buf) { + dev_err(&mgr->dev, "Image buffer is not provided\n"); + return -EINVAL; + } + + header_size = *(buf + MPF_HEADER_SIZE_OFFSET); + if (header_size > count) { + info->header_size = header_size; + return -EAGAIN; + } + + /* + * Go through look-up table to find out where actual bitstream starts + * and where sizes of components of the bitstream lies. + */ + blocks_num = *(buf + header_size - 1); + block_id_offset = header_size + MPF_LOOKUP_TABLE_BLOCK_ID_OFFSET; + block_start_offset = header_size + MPF_LOOKUP_TABLE_BLOCK_START_OFFSET; + + header_size += blocks_num * MPF_LOOKUP_TABLE_RECORD_SIZE; + if (header_size > count) { + info->header_size = header_size; + return -EAGAIN; + } + + components_size_start = 0; + bitstream_start = 0; + + while (blocks_num--) { + block_id = *(buf + block_id_offset); + block_start = get_unaligned_le32(buf + block_start_offset); + + switch (block_id) { + case MPF_BITSTREAM_ID: + bitstream_start = block_start; + info->header_size = block_start; + if (block_start > count) + return -EAGAIN; + + break; + case MPF_COMPONENTS_SIZE_ID: + components_size_start = block_start; + break; + default: + break; + } + + if (bitstream_start && components_size_start) + break; + + block_id_offset += MPF_LOOKUP_TABLE_RECORD_SIZE; + block_start_offset += MPF_LOOKUP_TABLE_RECORD_SIZE; + } + + if (!bitstream_start || !components_size_start) { + dev_err(&mgr->dev, "Failed to parse header look-up table\n"); + return -EFAULT; + } + + /* + * Parse bitstream size. + * Sizes of components of the bitstream are 22-bits long placed next + * to each other. Image header should be extended by now up to where + * actual bitstream starts, so no need for overflow check anymore. + */ + components_num = get_unaligned_le16(buf + MPF_DATA_SIZE_OFFSET); + + for (i = 0; i < components_num; i++) { + component_size_byte_num = + (i * MPF_BITS_PER_COMPONENT_SIZE) / BITS_PER_BYTE; + component_size_byte_off = + (i * MPF_BITS_PER_COMPONENT_SIZE) % BITS_PER_BYTE; + + component_size = get_unaligned_le32(buf + + components_size_start + + component_size_byte_num); + component_size >>= component_size_byte_off; + component_size &= GENMASK(MPF_BITS_PER_COMPONENT_SIZE - 1, 0); + + info->data_size += component_size * MPF_SPI_FRAME_SIZE; + } + + return 0; +} + +static int mpf_poll_status(struct mpf_priv *priv, u8 mask) +{ + int ret, status; + + /* + * Busy poll HW status. Polling stops if any of the following + * conditions are met: + * - timeout is reached + * - mpf_read_status() returns an error + * - busy bit is cleared AND mask bits are set + */ + ret = read_poll_timeout(mpf_read_status, status, + (status < 0) || + ((status & (MPF_STATUS_BUSY | mask)) == mask), + 0, MPF_STATUS_POLL_TIMEOUT, false, priv); + if (ret < 0) + return ret; + + return status; +} + +static int mpf_spi_write(struct mpf_priv *priv, const void *buf, size_t buf_size) +{ + int status = mpf_poll_status(priv, 0); + + if (status < 0) + return status; + + return spi_write_then_read(priv->spi, buf, buf_size, NULL, 0); +} + +static int mpf_spi_write_then_read(struct mpf_priv *priv, + const void *txbuf, size_t txbuf_size, + void *rxbuf, size_t rxbuf_size) +{ + const u8 read_command[] = { MPF_SPI_READ_DATA }; + int ret; + + ret = mpf_spi_write(priv, txbuf, txbuf_size); + if (ret) + return ret; + + ret = mpf_poll_status(priv, MPF_STATUS_READY); + if (ret < 0) + return ret; + + return spi_write_then_read(priv->spi, read_command, sizeof(read_command), + rxbuf, rxbuf_size); +} + +static int mpf_ops_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, const char *buf, + size_t count) +{ + const u8 program_mode[] = { MPF_SPI_FRAME_INIT, MPF_SPI_PRG_MODE }; + const u8 isc_en_command[] = { MPF_SPI_ISC_ENABLE }; + struct mpf_priv *priv = mgr->priv; + struct device *dev = &mgr->dev; + u32 isc_ret = 0; + int ret; + + if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { + dev_err(dev, "Partial reconfiguration is not supported\n"); + return -EOPNOTSUPP; + } + + ret = mpf_spi_write_then_read(priv, isc_en_command, sizeof(isc_en_command), + &isc_ret, sizeof(isc_ret)); + if (ret || isc_ret) { + dev_err(dev, "Failed to enable ISC: spi_ret %d, isc_ret %u\n", + ret, isc_ret); + return -EFAULT; + } + + ret = mpf_spi_write(priv, program_mode, sizeof(program_mode)); + if (ret) { + dev_err(dev, "Failed to enter program mode: %d\n", ret); + return ret; + } + + priv->program_mode = true; + + return 0; +} + +static int mpf_spi_frame_write(struct mpf_priv *priv, const char *buf) +{ + struct spi_transfer xfers[2] = { + { + .tx_buf = &priv->tx, + .len = 1, + }, { + .tx_buf = buf, + .len = MPF_SPI_FRAME_SIZE, + }, + }; + int ret; + + ret = mpf_poll_status(priv, 0); + if (ret < 0) + return ret; + + priv->tx = MPF_SPI_FRAME; + + return spi_sync_transfer(priv->spi, xfers, ARRAY_SIZE(xfers)); +} + +static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count) +{ + struct mpf_priv *priv = mgr->priv; + struct device *dev = &mgr->dev; + int ret, i; + + if (count % MPF_SPI_FRAME_SIZE) { + dev_err(dev, "Bitstream size is not a multiple of %d\n", + MPF_SPI_FRAME_SIZE); + return -EINVAL; + } + + for (i = 0; i < count / MPF_SPI_FRAME_SIZE; i++) { + ret = mpf_spi_frame_write(priv, buf + i * MPF_SPI_FRAME_SIZE); + if (ret) { + dev_err(dev, "Failed to write bitstream frame %d/%zu\n", + i, count / MPF_SPI_FRAME_SIZE); + return ret; + } + } + + return 0; +} + +static int mpf_ops_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + const u8 isc_dis_command[] = { MPF_SPI_ISC_DISABLE }; + const u8 release_command[] = { MPF_SPI_RELEASE }; + struct mpf_priv *priv = mgr->priv; + struct device *dev = &mgr->dev; + int ret; + + ret = mpf_spi_write(priv, isc_dis_command, sizeof(isc_dis_command)); + if (ret) { + dev_err(dev, "Failed to disable ISC: %d\n", ret); + return ret; + } + + usleep_range(1000, 2000); + + ret = mpf_spi_write(priv, release_command, sizeof(release_command)); + if (ret) { + dev_err(dev, "Failed to exit program mode: %d\n", ret); + return ret; + } + + priv->program_mode = false; + + return 0; +} + +static const struct fpga_manager_ops mpf_ops = { + .state = mpf_ops_state, + .initial_header_size = 71, + .skip_header = true, + .parse_header = mpf_ops_parse_header, + .write_init = mpf_ops_write_init, + .write = mpf_ops_write, + .write_complete = mpf_ops_write_complete, +}; + +static int mpf_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct fpga_manager *mgr; + struct mpf_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->spi = spi; + + mgr = devm_fpga_mgr_register(dev, "Microchip Polarfire SPI FPGA Manager", + &mpf_ops, priv); + + return PTR_ERR_OR_ZERO(mgr); +} + +static const struct spi_device_id mpf_spi_ids[] = { + { .name = "mpf-spi-fpga-mgr", }, + {}, +}; +MODULE_DEVICE_TABLE(spi, mpf_spi_ids); + +#if IS_ENABLED(CONFIG_OF) +static const struct of_device_id mpf_of_ids[] = { + { .compatible = "microchip,mpf-spi-fpga-mgr" }, + {}, +}; +MODULE_DEVICE_TABLE(of, mpf_of_ids); +#endif /* IS_ENABLED(CONFIG_OF) */ + +static struct spi_driver mpf_driver = { + .probe = mpf_probe, + .id_table = mpf_spi_ids, + .driver = { + .name = "microchip_mpf_spi_fpga_mgr", + .of_match_table = of_match_ptr(mpf_of_ids), + }, +}; + +module_spi_driver(mpf_driver); + +MODULE_DESCRIPTION("Microchip Polarfire SPI FPGA Manager"); +MODULE_AUTHOR("Ivan Bornyakov <i.bornyakov@metrotek.ru>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index e3c25576b6b9..43db4bb77138 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c @@ -12,7 +12,9 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -28,7 +30,7 @@ MODULE_DEVICE_TABLE(of, fpga_region_of_match); * * Caller will need to put_device(®ion->dev) when done. * - * Returns FPGA Region struct or NULL + * Return: FPGA Region struct or NULL */ static struct fpga_region *of_fpga_region_find(struct device_node *np) { @@ -80,7 +82,7 @@ static struct fpga_manager *of_fpga_region_get_mgr(struct device_node *np) * Caller should call fpga_bridges_put(®ion->bridge_list) when * done with the bridges. * - * Return 0 for success (even if there are no bridges specified) + * Return: 0 for success (even if there are no bridges specified) * or -EBUSY if any of the bridges are in use. */ static int of_fpga_region_get_bridges(struct fpga_region *region) @@ -139,13 +141,13 @@ static int of_fpga_region_get_bridges(struct fpga_region *region) } /** - * child_regions_with_firmware + * child_regions_with_firmware - Used to check the child region info. * @overlay: device node of the overlay * * If the overlay adds child FPGA regions, they are not allowed to have * firmware-name property. * - * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name. + * Return: 0 for OK or -EINVAL if child FPGA region adds firmware-name. */ static int child_regions_with_firmware(struct device_node *overlay) { @@ -184,14 +186,14 @@ static int child_regions_with_firmware(struct device_node *overlay) * Given an overlay applied to an FPGA region, parse the FPGA image specific * info in the overlay and do some checking. * - * Returns: + * Return: * NULL if overlay doesn't direct us to program the FPGA. * fpga_image_info struct if there is an image to program. * error code for invalid overlay. */ -static struct fpga_image_info *of_fpga_region_parse_ov( - struct fpga_region *region, - struct device_node *overlay) +static struct fpga_image_info * +of_fpga_region_parse_ov(struct fpga_region *region, + struct device_node *overlay) { struct device *dev = ®ion->dev; struct fpga_image_info *info; @@ -279,7 +281,7 @@ ret_no_info: * If the checks fail, overlay is rejected and does not get added to the * live tree. * - * Returns 0 for success or negative error code for failure. + * Return: 0 for success or negative error code for failure. */ static int of_fpga_region_notify_pre_apply(struct fpga_region *region, struct of_overlay_notify_data *nd) @@ -339,7 +341,7 @@ static void of_fpga_region_notify_post_remove(struct fpga_region *region, * This notifier handles programming an FPGA when a "firmware-name" property is * added to an fpga-region. * - * Returns NOTIFY_OK or error if FPGA programming fails. + * Return: NOTIFY_OK or error if FPGA programming fails. */ static int of_fpga_region_notify(struct notifier_block *nb, unsigned long action, void *arg) @@ -405,16 +407,12 @@ static int of_fpga_region_probe(struct platform_device *pdev) if (IS_ERR(mgr)) return -EPROBE_DEFER; - region = devm_fpga_region_create(dev, mgr, of_fpga_region_get_bridges); - if (!region) { - ret = -ENOMEM; + region = fpga_region_register(dev, mgr, of_fpga_region_get_bridges); + if (IS_ERR(region)) { + ret = PTR_ERR(region); goto eprobe_mgr_put; } - ret = fpga_region_register(region); - if (ret) - goto eprobe_mgr_put; - of_platform_populate(np, fpga_region_of_match, NULL, ®ion->dev); platform_set_drvdata(pdev, region); @@ -427,15 +425,13 @@ eprobe_mgr_put: return ret; } -static int of_fpga_region_remove(struct platform_device *pdev) +static void of_fpga_region_remove(struct platform_device *pdev) { struct fpga_region *region = platform_get_drvdata(pdev); struct fpga_manager *mgr = region->mgr; fpga_region_unregister(region); fpga_mgr_put(mgr); - - return 0; } static struct platform_driver of_fpga_region_driver = { @@ -448,8 +444,10 @@ static struct platform_driver of_fpga_region_driver = { }; /** - * fpga_region_init - init function for fpga_region class + * of_fpga_region_init - init function for fpga_region class * Creates the fpga_region class and registers a reconfig notifier. + * + * Return: 0 on success, negative error code otherwise. */ static int __init of_fpga_region_init(void) { diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c index 573d88bdf730..0165a3c86932 100644 --- a/drivers/fpga/socfpga-a10.c +++ b/drivers/fpga/socfpga-a10.c @@ -471,7 +471,6 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev) struct a10_fpga_priv *priv; void __iomem *reg_base; struct fpga_manager *mgr; - struct resource *res; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -479,14 +478,12 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev) return -ENOMEM; /* First mmio base is for register access */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg_base = devm_ioremap_resource(dev, res); + reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg_base)) return PTR_ERR(reg_base); /* Second mmio base is for writing FPGA image data */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->fpga_data_addr = devm_ioremap_resource(dev, res); + priv->fpga_data_addr = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(priv->fpga_data_addr)) return PTR_ERR(priv->fpga_data_addr); @@ -508,31 +505,25 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev) return -EBUSY; } - mgr = devm_fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager", - &socfpga_a10_fpga_mgr_ops, priv); - if (!mgr) - return -ENOMEM; - - platform_set_drvdata(pdev, mgr); - - ret = fpga_mgr_register(mgr); - if (ret) { + mgr = fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager", + &socfpga_a10_fpga_mgr_ops, priv); + if (IS_ERR(mgr)) { clk_disable_unprepare(priv->clk); - return ret; + return PTR_ERR(mgr); } + platform_set_drvdata(pdev, mgr); + return 0; } -static int socfpga_a10_fpga_remove(struct platform_device *pdev) +static void socfpga_a10_fpga_remove(struct platform_device *pdev) { struct fpga_manager *mgr = platform_get_drvdata(pdev); struct a10_fpga_priv *priv = mgr->priv; fpga_mgr_unregister(mgr); clk_disable_unprepare(priv->clk); - - return 0; } static const struct of_device_id socfpga_a10_fpga_of_match[] = { diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c index 1f467173fc1f..b08b4bb8f650 100644 --- a/drivers/fpga/socfpga.c +++ b/drivers/fpga/socfpga.c @@ -301,16 +301,17 @@ static irqreturn_t socfpga_fpga_isr(int irq, void *dev_id) static int socfpga_fpga_wait_for_config_done(struct socfpga_fpga_priv *priv) { - int timeout, ret = 0; + int ret = 0; + long time_left; socfpga_fpga_disable_irqs(priv); init_completion(&priv->status_complete); socfpga_fpga_enable_irqs(priv, SOCFPGA_FPGMGR_MON_CONF_DONE); - timeout = wait_for_completion_interruptible_timeout( + time_left = wait_for_completion_interruptible_timeout( &priv->status_complete, msecs_to_jiffies(10)); - if (timeout == 0) + if (time_left == 0) ret = -ETIMEDOUT; socfpga_fpga_disable_irqs(priv); @@ -545,20 +546,17 @@ static int socfpga_fpga_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct socfpga_fpga_priv *priv; struct fpga_manager *mgr; - struct resource *res; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->fpga_base_addr = devm_ioremap_resource(dev, res); + priv->fpga_base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->fpga_base_addr)) return PTR_ERR(priv->fpga_base_addr); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->fpga_data_addr = devm_ioremap_resource(dev, res); + priv->fpga_data_addr = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(priv->fpga_data_addr)) return PTR_ERR(priv->fpga_data_addr); @@ -571,12 +569,9 @@ static int socfpga_fpga_probe(struct platform_device *pdev) if (ret) return ret; - mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager", - &socfpga_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, "Altera SOCFPGA FPGA Manager", + &socfpga_fpga_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } #ifdef CONFIG_OF diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index a2cea500f7cc..0a295ccf1644 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> /* * FPGA programming requires a higher level of privilege (EL3), per the SoC @@ -213,9 +214,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr, /* Allocate buffers from the service layer's pool. */ for (i = 0; i < NUM_SVC_BUFS; i++) { kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE); - if (!kbuf) { + if (IS_ERR(kbuf)) { s10_free_buffers(mgr); - ret = -ENOMEM; + ret = PTR_ERR(kbuf); goto init_done; } @@ -388,13 +389,7 @@ static int s10_ops_write_complete(struct fpga_manager *mgr, return ret; } -static enum fpga_mgr_states s10_ops_state(struct fpga_manager *mgr) -{ - return FPGA_MGR_STATE_UNKNOWN; -} - static const struct fpga_manager_ops s10_ops = { - .state = s10_ops_state, .write_init = s10_ops_write_init, .write = s10_ops_write, .write_complete = s10_ops_write_complete, @@ -425,39 +420,29 @@ static int s10_probe(struct platform_device *pdev) init_completion(&priv->status_return_completion); - mgr = fpga_mgr_create(dev, "Stratix10 SOC FPGA Manager", - &s10_ops, priv); - if (!mgr) { - dev_err(dev, "unable to create FPGA manager\n"); - ret = -ENOMEM; - goto probe_err; - } - - ret = fpga_mgr_register(mgr); - if (ret) { + mgr = fpga_mgr_register(dev, "Stratix10 SOC FPGA Manager", + &s10_ops, priv); + if (IS_ERR(mgr)) { dev_err(dev, "unable to register FPGA manager\n"); - fpga_mgr_free(mgr); + ret = PTR_ERR(mgr); goto probe_err; } platform_set_drvdata(pdev, mgr); - return ret; + return 0; probe_err: stratix10_svc_free_channel(priv->chan); return ret; } -static int s10_remove(struct platform_device *pdev) +static void s10_remove(struct platform_device *pdev) { struct fpga_manager *mgr = platform_get_drvdata(pdev); struct s10_priv *priv = mgr->priv; fpga_mgr_unregister(mgr); - fpga_mgr_free(mgr); stratix10_svc_free_channel(priv->chan); - - return 0; } static const struct of_device_id s10_of_match[] = { diff --git a/drivers/fpga/tests/.kunitconfig b/drivers/fpga/tests/.kunitconfig new file mode 100644 index 000000000000..a1c2a2974c39 --- /dev/null +++ b/drivers/fpga/tests/.kunitconfig @@ -0,0 +1,5 @@ +CONFIG_KUNIT=y +CONFIG_FPGA=y +CONFIG_FPGA_REGION=y +CONFIG_FPGA_BRIDGE=y +CONFIG_FPGA_KUNIT_TESTS=y diff --git a/drivers/fpga/tests/Kconfig b/drivers/fpga/tests/Kconfig new file mode 100644 index 000000000000..e4a64815f16d --- /dev/null +++ b/drivers/fpga/tests/Kconfig @@ -0,0 +1,11 @@ +config FPGA_KUNIT_TESTS + tristate "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS + depends on FPGA && FPGA_REGION && FPGA_BRIDGE && KUNIT=y + default KUNIT_ALL_TESTS + help + This builds unit tests for the FPGA subsystem + + For more information on KUnit and unit tests in general, + please refer to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. diff --git a/drivers/fpga/tests/Makefile b/drivers/fpga/tests/Makefile new file mode 100644 index 000000000000..bb78215c645c --- /dev/null +++ b/drivers/fpga/tests/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for KUnit test suites for the FPGA subsystem +# + +obj-$(CONFIG_FPGA_KUNIT_TESTS) += fpga-mgr-test.o fpga-bridge-test.o fpga-region-test.o diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c new file mode 100644 index 000000000000..124ba40e32b1 --- /dev/null +++ b/drivers/fpga/tests/fpga-bridge-test.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test for the FPGA Bridge + * + * Copyright (C) 2023 Red Hat, Inc. + * + * Author: Marco Pagani <marpagan@redhat.com> + */ + +#include <kunit/device.h> +#include <kunit/test.h> +#include <linux/fpga/fpga-bridge.h> +#include <linux/module.h> +#include <linux/types.h> + +struct bridge_stats { + bool enable; +}; + +struct bridge_ctx { + struct fpga_bridge *bridge; + struct device *dev; + struct bridge_stats stats; +}; + +/* + * Wrapper to avoid a cast warning when passing the action function directly + * to kunit_add_action(). + */ +KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister, + struct fpga_bridge *); + +static int op_enable_set(struct fpga_bridge *bridge, bool enable) +{ + struct bridge_stats *stats = bridge->priv; + + stats->enable = enable; + + return 0; +} + +/* + * Fake FPGA bridge that implements only the enable_set op to track + * the state. + */ +static const struct fpga_bridge_ops fake_bridge_ops = { + .enable_set = op_enable_set, +}; + +/** + * register_test_bridge() - Register a fake FPGA bridge for testing. + * @test: KUnit test context object. + * @dev_name: name of the kunit device to be registered + * + * Return: Context of the newly registered FPGA bridge. + */ +static struct bridge_ctx *register_test_bridge(struct kunit *test, const char *dev_name) +{ + struct bridge_ctx *ctx; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + + ctx->dev = kunit_device_register(test, dev_name); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev); + + ctx->bridge = fpga_bridge_register(ctx->dev, "Fake FPGA bridge", &fake_bridge_ops, + &ctx->stats); + KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge)); + + ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge); + KUNIT_ASSERT_EQ(test, ret, 0); + + return ctx; +} + +static void fpga_bridge_test_get(struct kunit *test) +{ + struct bridge_ctx *ctx = test->priv; + struct fpga_bridge *bridge; + + bridge = fpga_bridge_get(ctx->dev, NULL); + KUNIT_EXPECT_PTR_EQ(test, bridge, ctx->bridge); + + bridge = fpga_bridge_get(ctx->dev, NULL); + KUNIT_EXPECT_EQ(test, PTR_ERR(bridge), -EBUSY); + + fpga_bridge_put(ctx->bridge); +} + +static void fpga_bridge_test_toggle(struct kunit *test) +{ + struct bridge_ctx *ctx = test->priv; + int ret; + + ret = fpga_bridge_disable(ctx->bridge); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_FALSE(test, ctx->stats.enable); + + ret = fpga_bridge_enable(ctx->bridge); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, ctx->stats.enable); +} + +/* Test the functions for getting and controlling a list of bridges */ +static void fpga_bridge_test_get_put_list(struct kunit *test) +{ + struct list_head bridge_list; + struct bridge_ctx *ctx_0, *ctx_1; + int ret; + + ctx_0 = test->priv; + ctx_1 = register_test_bridge(test, "fpga-bridge-test-dev-1"); + + INIT_LIST_HEAD(&bridge_list); + + /* Get bridge 0 and add it to the list */ + ret = fpga_bridge_get_to_list(ctx_0->dev, NULL, &bridge_list); + KUNIT_EXPECT_EQ(test, ret, 0); + + KUNIT_EXPECT_PTR_EQ(test, ctx_0->bridge, + list_first_entry_or_null(&bridge_list, struct fpga_bridge, node)); + + /* Get bridge 1 and add it to the list */ + ret = fpga_bridge_get_to_list(ctx_1->dev, NULL, &bridge_list); + KUNIT_EXPECT_EQ(test, ret, 0); + + KUNIT_EXPECT_PTR_EQ(test, ctx_1->bridge, + list_first_entry_or_null(&bridge_list, struct fpga_bridge, node)); + + /* Disable an then enable both bridges from the list */ + ret = fpga_bridges_disable(&bridge_list); + KUNIT_EXPECT_EQ(test, ret, 0); + + KUNIT_EXPECT_FALSE(test, ctx_0->stats.enable); + KUNIT_EXPECT_FALSE(test, ctx_1->stats.enable); + + ret = fpga_bridges_enable(&bridge_list); + KUNIT_EXPECT_EQ(test, ret, 0); + + KUNIT_EXPECT_TRUE(test, ctx_0->stats.enable); + KUNIT_EXPECT_TRUE(test, ctx_1->stats.enable); + + /* Put and remove both bridges from the list */ + fpga_bridges_put(&bridge_list); + + KUNIT_EXPECT_TRUE(test, list_empty(&bridge_list)); +} + +static int fpga_bridge_test_init(struct kunit *test) +{ + test->priv = register_test_bridge(test, "fpga-bridge-test-dev-0"); + + return 0; +} + +static struct kunit_case fpga_bridge_test_cases[] = { + KUNIT_CASE(fpga_bridge_test_get), + KUNIT_CASE(fpga_bridge_test_toggle), + KUNIT_CASE(fpga_bridge_test_get_put_list), + {} +}; + +static struct kunit_suite fpga_bridge_suite = { + .name = "fpga_bridge", + .init = fpga_bridge_test_init, + .test_cases = fpga_bridge_test_cases, +}; + +kunit_test_suite(fpga_bridge_suite); + +MODULE_DESCRIPTION("KUnit test for the FPGA Bridge"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c new file mode 100644 index 000000000000..62975a39ee14 --- /dev/null +++ b/drivers/fpga/tests/fpga-mgr-test.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test for the FPGA Manager + * + * Copyright (C) 2023 Red Hat, Inc. + * + * Author: Marco Pagani <marpagan@redhat.com> + */ + +#include <kunit/device.h> +#include <kunit/test.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/types.h> + +#define HEADER_FILL 'H' +#define IMAGE_FILL 'P' +#define IMAGE_BLOCK 1024 + +#define HEADER_SIZE IMAGE_BLOCK +#define IMAGE_SIZE (IMAGE_BLOCK * 4) + +struct mgr_stats { + bool header_match; + bool image_match; + u32 seq_num; + u32 op_parse_header_seq; + u32 op_write_init_seq; + u32 op_write_seq; + u32 op_write_sg_seq; + u32 op_write_complete_seq; + enum fpga_mgr_states op_parse_header_state; + enum fpga_mgr_states op_write_init_state; + enum fpga_mgr_states op_write_state; + enum fpga_mgr_states op_write_sg_state; + enum fpga_mgr_states op_write_complete_state; +}; + +struct mgr_ctx { + struct fpga_image_info *img_info; + struct fpga_manager *mgr; + struct device *dev; + struct mgr_stats stats; +}; + +/* + * Wrappers to avoid cast warnings when passing action functions directly + * to kunit_add_action(). + */ +KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table, + struct sg_table *); + +KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free, + struct fpga_image_info *); + +/** + * init_test_buffer() - Allocate and initialize a test image in a buffer. + * @test: KUnit test context object. + * @count: image size in bytes. + * + * Return: pointer to the newly allocated image. + */ +static char *init_test_buffer(struct kunit *test, size_t count) +{ + char *buf; + + KUNIT_ASSERT_GE(test, count, HEADER_SIZE); + + buf = kunit_kzalloc(test, count, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + + memset(buf, HEADER_FILL, HEADER_SIZE); + memset(buf + HEADER_SIZE, IMAGE_FILL, count - HEADER_SIZE); + + return buf; +} + +/* + * Check the image header. Do not return an error code if the image check fails + * since, in this case, it is a failure of the FPGA manager itself, not this + * op that tests it. + */ +static int op_parse_header(struct fpga_manager *mgr, struct fpga_image_info *info, + const char *buf, size_t count) +{ + struct mgr_stats *stats = mgr->priv; + size_t i; + + stats->op_parse_header_state = mgr->state; + stats->op_parse_header_seq = stats->seq_num++; + + /* Set header_size and data_size for later */ + info->header_size = HEADER_SIZE; + info->data_size = info->count - HEADER_SIZE; + + stats->header_match = true; + for (i = 0; i < info->header_size; i++) { + if (buf[i] != HEADER_FILL) { + stats->header_match = false; + break; + } + } + + return 0; +} + +static int op_write_init(struct fpga_manager *mgr, struct fpga_image_info *info, + const char *buf, size_t count) +{ + struct mgr_stats *stats = mgr->priv; + + stats->op_write_init_state = mgr->state; + stats->op_write_init_seq = stats->seq_num++; + + return 0; +} + +/* + * Check the image data. As with op_parse_header, do not return an error code + * if the image check fails. + */ +static int op_write(struct fpga_manager *mgr, const char *buf, size_t count) +{ + struct mgr_stats *stats = mgr->priv; + size_t i; + + stats->op_write_state = mgr->state; + stats->op_write_seq = stats->seq_num++; + + stats->image_match = true; + for (i = 0; i < count; i++) { + if (buf[i] != IMAGE_FILL) { + stats->image_match = false; + break; + } + } + + return 0; +} + +/* + * Check the image data, but first skip the header since write_sg will get + * the whole image in sg_table. As with op_parse_header, do not return an + * error code if the image check fails. + */ +static int op_write_sg(struct fpga_manager *mgr, struct sg_table *sgt) +{ + struct mgr_stats *stats = mgr->priv; + struct sg_mapping_iter miter; + char *img; + size_t i; + + stats->op_write_sg_state = mgr->state; + stats->op_write_sg_seq = stats->seq_num++; + + stats->image_match = true; + sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG); + + if (!sg_miter_skip(&miter, HEADER_SIZE)) { + stats->image_match = false; + goto out; + } + + while (sg_miter_next(&miter)) { + img = miter.addr; + for (i = 0; i < miter.length; i++) { + if (img[i] != IMAGE_FILL) { + stats->image_match = false; + goto out; + } + } + } +out: + sg_miter_stop(&miter); + return 0; +} + +static int op_write_complete(struct fpga_manager *mgr, struct fpga_image_info *info) +{ + struct mgr_stats *stats = mgr->priv; + + stats->op_write_complete_state = mgr->state; + stats->op_write_complete_seq = stats->seq_num++; + + return 0; +} + +/* + * Fake FPGA manager that implements all ops required to check the programming + * sequence using a single contiguous buffer and a scatter gather table. + */ +static const struct fpga_manager_ops fake_mgr_ops = { + .skip_header = true, + .parse_header = op_parse_header, + .write_init = op_write_init, + .write = op_write, + .write_sg = op_write_sg, + .write_complete = op_write_complete, +}; + +static void fpga_mgr_test_get(struct kunit *test) +{ + struct mgr_ctx *ctx = test->priv; + struct fpga_manager *mgr; + + mgr = fpga_mgr_get(ctx->dev); + KUNIT_EXPECT_PTR_EQ(test, mgr, ctx->mgr); + + fpga_mgr_put(ctx->mgr); +} + +static void fpga_mgr_test_lock(struct kunit *test) +{ + struct mgr_ctx *ctx = test->priv; + int ret; + + ret = fpga_mgr_lock(ctx->mgr); + KUNIT_EXPECT_EQ(test, ret, 0); + + ret = fpga_mgr_lock(ctx->mgr); + KUNIT_EXPECT_EQ(test, ret, -EBUSY); + + fpga_mgr_unlock(ctx->mgr); +} + +/* Check the programming sequence using an image in a buffer */ +static void fpga_mgr_test_img_load_buf(struct kunit *test) +{ + struct mgr_ctx *ctx = test->priv; + char *img_buf; + int ret; + + img_buf = init_test_buffer(test, IMAGE_SIZE); + + ctx->img_info->count = IMAGE_SIZE; + ctx->img_info->buf = img_buf; + + ret = fpga_mgr_load(ctx->mgr, ctx->img_info); + KUNIT_EXPECT_EQ(test, ret, 0); + + KUNIT_EXPECT_TRUE(test, ctx->stats.header_match); + KUNIT_EXPECT_TRUE(test, ctx->stats.image_match); + + KUNIT_EXPECT_EQ(test, ctx->stats.op_parse_header_state, FPGA_MGR_STATE_PARSE_HEADER); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_state, FPGA_MGR_STATE_WRITE_INIT); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_state, FPGA_MGR_STATE_WRITE); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_state, FPGA_MGR_STATE_WRITE_COMPLETE); + + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_seq, ctx->stats.op_parse_header_seq + 2); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3); +} + +/* Check the programming sequence using an image in a scatter gather table */ +static void fpga_mgr_test_img_load_sgt(struct kunit *test) +{ + struct mgr_ctx *ctx = test->priv; + struct sg_table *sgt; + char *img_buf; + int ret; + + img_buf = init_test_buffer(test, IMAGE_SIZE); + + sgt = kunit_kzalloc(test, sizeof(*sgt), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + KUNIT_ASSERT_EQ(test, ret, 0); + sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE); + + ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + + ctx->img_info->sgt = sgt; + + ret = fpga_mgr_load(ctx->mgr, ctx->img_info); + KUNIT_EXPECT_EQ(test, ret, 0); + + KUNIT_EXPECT_TRUE(test, ctx->stats.header_match); + KUNIT_EXPECT_TRUE(test, ctx->stats.image_match); + + KUNIT_EXPECT_EQ(test, ctx->stats.op_parse_header_state, FPGA_MGR_STATE_PARSE_HEADER); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_state, FPGA_MGR_STATE_WRITE_INIT); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_state, FPGA_MGR_STATE_WRITE); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_state, FPGA_MGR_STATE_WRITE_COMPLETE); + + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_seq, ctx->stats.op_parse_header_seq + 2); + KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3); +} + +static int fpga_mgr_test_init(struct kunit *test) +{ + struct mgr_ctx *ctx; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + + ctx->dev = kunit_device_register(test, "fpga-manager-test-dev"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev); + + ctx->mgr = devm_fpga_mgr_register(ctx->dev, "Fake FPGA Manager", &fake_mgr_ops, + &ctx->stats); + KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr)); + + ctx->img_info = fpga_image_info_alloc(ctx->dev); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->img_info); + + ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, ctx->img_info); + KUNIT_ASSERT_EQ(test, ret, 0); + + test->priv = ctx; + + return 0; +} + +static struct kunit_case fpga_mgr_test_cases[] = { + KUNIT_CASE(fpga_mgr_test_get), + KUNIT_CASE(fpga_mgr_test_lock), + KUNIT_CASE(fpga_mgr_test_img_load_buf), + KUNIT_CASE(fpga_mgr_test_img_load_sgt), + {} +}; + +static struct kunit_suite fpga_mgr_suite = { + .name = "fpga_mgr", + .init = fpga_mgr_test_init, + .test_cases = fpga_mgr_test_cases, +}; + +kunit_test_suite(fpga_mgr_suite); + +MODULE_DESCRIPTION("KUnit test for the FPGA Manager"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c new file mode 100644 index 000000000000..020ceac48509 --- /dev/null +++ b/drivers/fpga/tests/fpga-region-test.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test for the FPGA Region + * + * Copyright (C) 2023 Red Hat, Inc. + * + * Author: Marco Pagani <marpagan@redhat.com> + */ + +#include <kunit/device.h> +#include <kunit/test.h> +#include <linux/fpga/fpga-bridge.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/fpga/fpga-region.h> +#include <linux/module.h> +#include <linux/types.h> + +struct mgr_stats { + u32 write_count; +}; + +struct bridge_stats { + bool enable; + u32 cycles_count; +}; + +struct test_ctx { + struct fpga_manager *mgr; + struct device *mgr_dev; + struct fpga_bridge *bridge; + struct device *bridge_dev; + struct fpga_region *region; + struct device *region_dev; + struct bridge_stats bridge_stats; + struct mgr_stats mgr_stats; +}; + +/* + * Wrappers to avoid cast warnings when passing action functions directly + * to kunit_add_action(). + */ +KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free, + struct fpga_image_info *); + +KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister, + struct fpga_bridge *); + +KUNIT_DEFINE_ACTION_WRAPPER(fpga_region_unregister_wrapper, fpga_region_unregister, + struct fpga_region *); + +static int op_write(struct fpga_manager *mgr, const char *buf, size_t count) +{ + struct mgr_stats *stats = mgr->priv; + + stats->write_count++; + + return 0; +} + +/* + * Fake FPGA manager that implements only the write op to count the number + * of programming cycles. The internals of the programming sequence are + * tested in the Manager suite since they are outside the responsibility + * of the Region. + */ +static const struct fpga_manager_ops fake_mgr_ops = { + .write = op_write, +}; + +static int op_enable_set(struct fpga_bridge *bridge, bool enable) +{ + struct bridge_stats *stats = bridge->priv; + + if (!stats->enable && enable) + stats->cycles_count++; + + stats->enable = enable; + + return 0; +} + +/* + * Fake FPGA bridge that implements only enable_set op to count the number + * of activation cycles. + */ +static const struct fpga_bridge_ops fake_bridge_ops = { + .enable_set = op_enable_set, +}; + +static int fake_region_get_bridges(struct fpga_region *region) +{ + struct fpga_bridge *bridge = region->priv; + + return fpga_bridge_get_to_list(bridge->dev.parent, region->info, ®ion->bridge_list); +} + +static int fake_region_match(struct device *dev, const void *data) +{ + return dev->parent == data; +} + +static void fpga_region_test_class_find(struct kunit *test) +{ + struct test_ctx *ctx = test->priv; + struct fpga_region *region; + + region = fpga_region_class_find(NULL, ctx->region_dev, fake_region_match); + KUNIT_EXPECT_PTR_EQ(test, region, ctx->region); + + put_device(®ion->dev); +} + +/* + * FPGA Region programming test. The Region must call get_bridges() to get + * and control the bridges, and then the Manager for the actual programming. + */ +static void fpga_region_test_program_fpga(struct kunit *test) +{ + struct test_ctx *ctx = test->priv; + struct fpga_image_info *img_info; + char img_buf[4]; + int ret; + + img_info = fpga_image_info_alloc(ctx->mgr_dev); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, img_info); + + ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, img_info); + KUNIT_ASSERT_EQ(test, ret, 0); + + img_info->buf = img_buf; + img_info->count = sizeof(img_buf); + + ctx->region->info = img_info; + ret = fpga_region_program_fpga(ctx->region); + KUNIT_ASSERT_EQ(test, ret, 0); + + KUNIT_EXPECT_EQ(test, 1, ctx->mgr_stats.write_count); + KUNIT_EXPECT_EQ(test, 1, ctx->bridge_stats.cycles_count); + + fpga_bridges_put(&ctx->region->bridge_list); + + ret = fpga_region_program_fpga(ctx->region); + KUNIT_ASSERT_EQ(test, ret, 0); + + KUNIT_EXPECT_EQ(test, 2, ctx->mgr_stats.write_count); + KUNIT_EXPECT_EQ(test, 2, ctx->bridge_stats.cycles_count); + + fpga_bridges_put(&ctx->region->bridge_list); +} + +/* + * The configuration used in this test suite uses a single bridge to + * limit the code under test to a single unit. The functions used by the + * Region for getting and controlling bridges are tested (with a list of + * multiple bridges) in the Bridge suite. + */ +static int fpga_region_test_init(struct kunit *test) +{ + struct test_ctx *ctx; + struct fpga_region_info region_info = { 0 }; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + + ctx->mgr_dev = kunit_device_register(test, "fpga-manager-test-dev"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->mgr_dev); + + ctx->mgr = devm_fpga_mgr_register(ctx->mgr_dev, "Fake FPGA Manager", + &fake_mgr_ops, &ctx->mgr_stats); + KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr)); + + ctx->bridge_dev = kunit_device_register(test, "fpga-bridge-test-dev"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->bridge_dev); + + ctx->bridge = fpga_bridge_register(ctx->bridge_dev, "Fake FPGA Bridge", + &fake_bridge_ops, &ctx->bridge_stats); + KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge)); + + ctx->bridge_stats.enable = true; + + ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge); + KUNIT_ASSERT_EQ(test, ret, 0); + + ctx->region_dev = kunit_device_register(test, "fpga-region-test-dev"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_dev); + + region_info.mgr = ctx->mgr; + region_info.priv = ctx->bridge; + region_info.get_bridges = fake_region_get_bridges; + + ctx->region = fpga_region_register_full(ctx->region_dev, ®ion_info); + KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->region)); + + ret = kunit_add_action_or_reset(test, fpga_region_unregister_wrapper, ctx->region); + KUNIT_ASSERT_EQ(test, ret, 0); + + test->priv = ctx; + + return 0; +} + +static struct kunit_case fpga_region_test_cases[] = { + KUNIT_CASE(fpga_region_test_class_find), + KUNIT_CASE(fpga_region_test_program_fpga), + {} +}; + +static struct kunit_suite fpga_region_suite = { + .name = "fpga_region", + .init = fpga_region_test_init, + .test_cases = fpga_region_test_cases, +}; + +kunit_test_suite(fpga_region_suite); + +MODULE_DESCRIPTION("KUnit test for the FPGA Region"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c index 101f016c6ed8..4e1d2a4d3df4 100644 --- a/drivers/fpga/ts73xx-fpga.c +++ b/drivers/fpga/ts73xx-fpga.c @@ -32,11 +32,6 @@ struct ts73xx_fpga_priv { struct device *dev; }; -static enum fpga_mgr_states ts73xx_fpga_state(struct fpga_manager *mgr) -{ - return FPGA_MGR_STATE_UNKNOWN; -} - static int ts73xx_fpga_write_init(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count) @@ -98,7 +93,6 @@ static int ts73xx_fpga_write_complete(struct fpga_manager *mgr, } static const struct fpga_manager_ops ts73xx_fpga_ops = { - .state = ts73xx_fpga_state, .write_init = ts73xx_fpga_write_init, .write = ts73xx_fpga_write, .write_complete = ts73xx_fpga_write_complete, @@ -109,7 +103,6 @@ static int ts73xx_fpga_probe(struct platform_device *pdev) struct device *kdev = &pdev->dev; struct ts73xx_fpga_priv *priv; struct fpga_manager *mgr; - struct resource *res; priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -117,17 +110,13 @@ static int ts73xx_fpga_probe(struct platform_device *pdev) priv->dev = kdev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->io_base = devm_ioremap_resource(kdev, res); + priv->io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->io_base)) return PTR_ERR(priv->io_base); - mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager", - &ts73xx_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(kdev, mgr); + mgr = devm_fpga_mgr_register(kdev, "TS-73xx FPGA Manager", + &ts73xx_fpga_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } static struct platform_driver ts73xx_fpga_driver = { diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c new file mode 100644 index 000000000000..e6189106c468 --- /dev/null +++ b/drivers/fpga/versal-fpga.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019-2021 Xilinx, Inc. + */ + +#include <linux/dma-mapping.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/string.h> +#include <linux/firmware/xlnx-zynqmp.h> + +static int versal_fpga_ops_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t size) +{ + return 0; +} + +static int versal_fpga_ops_write(struct fpga_manager *mgr, + const char *buf, size_t size) +{ + dma_addr_t dma_addr = 0; + char *kbuf; + int ret; + + kbuf = dma_alloc_coherent(mgr->dev.parent, size, &dma_addr, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + memcpy(kbuf, buf, size); + ret = zynqmp_pm_load_pdi(PDI_SRC_DDR, dma_addr); + dma_free_coherent(mgr->dev.parent, size, kbuf, dma_addr); + + return ret; +} + +static const struct fpga_manager_ops versal_fpga_ops = { + .write_init = versal_fpga_ops_write_init, + .write = versal_fpga_ops_write, +}; + +static int versal_fpga_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct fpga_manager *mgr; + int ret; + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + if (ret < 0) { + dev_err(dev, "no usable DMA configuration\n"); + return ret; + } + + mgr = devm_fpga_mgr_register(dev, "Xilinx Versal FPGA Manager", + &versal_fpga_ops, NULL); + return PTR_ERR_OR_ZERO(mgr); +} + +static const struct of_device_id versal_fpga_of_match[] = { + { .compatible = "xlnx,versal-fpga", }, + {}, +}; +MODULE_DEVICE_TABLE(of, versal_fpga_of_match); + +static struct platform_driver versal_fpga_driver = { + .probe = versal_fpga_probe, + .driver = { + .name = "versal_fpga_manager", + .of_match_table = versal_fpga_of_match, + }, +}; +module_platform_driver(versal_fpga_driver); + +MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>"); +MODULE_AUTHOR("Appana Durga Kedareswara rao <appanad.durga.rao@xilinx.com>"); +MODULE_DESCRIPTION("Xilinx Versal FPGA Manager"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/xilinx-core.c b/drivers/fpga/xilinx-core.c new file mode 100644 index 000000000000..39aeacf2e4f1 --- /dev/null +++ b/drivers/fpga/xilinx-core.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Common parts of the Xilinx Spartan6 and 7 Series FPGA manager drivers. + * + * Copyright (C) 2017 DENX Software Engineering + * + * Anatolij Gustschin <agust@denx.de> + */ + +#include "xilinx-core.h" + +#include <linux/delay.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> + +static int get_done_gpio(struct fpga_manager *mgr) +{ + struct xilinx_fpga_core *core = mgr->priv; + int ret; + + ret = gpiod_get_value(core->done); + if (ret < 0) + dev_err(&mgr->dev, "Error reading DONE (%d)\n", ret); + + return ret; +} + +static enum fpga_mgr_states xilinx_core_state(struct fpga_manager *mgr) +{ + if (!get_done_gpio(mgr)) + return FPGA_MGR_STATE_RESET; + + return FPGA_MGR_STATE_UNKNOWN; +} + +/** + * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait + * a given delay if the pin is unavailable + * + * @mgr: The FPGA manager object + * @value: Value INIT_B to wait for (1 = asserted = low) + * @alt_udelay: Delay to wait if the INIT_B GPIO is not available + * + * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if + * too much time passed waiting for that. If no INIT_B GPIO is available + * then always return 0. + */ +static int wait_for_init_b(struct fpga_manager *mgr, int value, + unsigned long alt_udelay) +{ + struct xilinx_fpga_core *core = mgr->priv; + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + + if (core->init_b) { + while (time_before(jiffies, timeout)) { + int ret = gpiod_get_value(core->init_b); + + if (ret == value) + return 0; + + if (ret < 0) { + dev_err(&mgr->dev, + "Error reading INIT_B (%d)\n", ret); + return ret; + } + + usleep_range(100, 400); + } + + dev_err(&mgr->dev, "Timeout waiting for INIT_B to %s\n", + value ? "assert" : "deassert"); + return -ETIMEDOUT; + } + + udelay(alt_udelay); + + return 0; +} + +static int xilinx_core_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, const char *buf, + size_t count) +{ + struct xilinx_fpga_core *core = mgr->priv; + int err; + + if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { + dev_err(&mgr->dev, "Partial reconfiguration not supported\n"); + return -EINVAL; + } + + gpiod_set_value(core->prog_b, 1); + + err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */ + if (err) { + gpiod_set_value(core->prog_b, 0); + return err; + } + + gpiod_set_value(core->prog_b, 0); + + err = wait_for_init_b(mgr, 0, 0); + if (err) + return err; + + if (get_done_gpio(mgr)) { + dev_err(&mgr->dev, "Unexpected DONE pin state...\n"); + return -EIO; + } + + /* program latency */ + usleep_range(7500, 7600); + return 0; +} + +static int xilinx_core_write(struct fpga_manager *mgr, const char *buf, + size_t count) +{ + struct xilinx_fpga_core *core = mgr->priv; + + return core->write(core, buf, count); +} + +static int xilinx_core_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + struct xilinx_fpga_core *core = mgr->priv; + unsigned long timeout = + jiffies + usecs_to_jiffies(info->config_complete_timeout_us); + bool expired = false; + int done; + int ret; + const char padding[1] = { 0xff }; + + /* + * This loop is carefully written such that if the driver is + * scheduled out for more than 'timeout', we still check for DONE + * before giving up and we apply 8 extra CCLK cycles in all cases. + */ + while (!expired) { + expired = time_after(jiffies, timeout); + + done = get_done_gpio(mgr); + if (done < 0) + return done; + + ret = core->write(core, padding, sizeof(padding)); + if (ret) + return ret; + + if (done) + return 0; + } + + if (core->init_b) { + ret = gpiod_get_value(core->init_b); + + if (ret < 0) { + dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret); + return ret; + } + + dev_err(&mgr->dev, + ret ? "CRC error or invalid device\n" : + "Missing sync word or incomplete bitstream\n"); + } else { + dev_err(&mgr->dev, "Timeout after config data transfer\n"); + } + + return -ETIMEDOUT; +} + +static inline struct gpio_desc * +xilinx_core_devm_gpiod_get(struct device *dev, const char *con_id, + const char *legacy_con_id, enum gpiod_flags flags) +{ + struct gpio_desc *desc; + + desc = devm_gpiod_get(dev, con_id, flags); + if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT && + of_device_is_compatible(dev->of_node, "xlnx,fpga-slave-serial")) + desc = devm_gpiod_get(dev, legacy_con_id, flags); + + return desc; +} + +static const struct fpga_manager_ops xilinx_core_ops = { + .state = xilinx_core_state, + .write_init = xilinx_core_write_init, + .write = xilinx_core_write, + .write_complete = xilinx_core_write_complete, +}; + +int xilinx_core_probe(struct xilinx_fpga_core *core) +{ + struct fpga_manager *mgr; + + if (!core || !core->dev || !core->write) + return -EINVAL; + + /* PROGRAM_B is active low */ + core->prog_b = xilinx_core_devm_gpiod_get(core->dev, "prog", "prog_b", + GPIOD_OUT_LOW); + if (IS_ERR(core->prog_b)) + return dev_err_probe(core->dev, PTR_ERR(core->prog_b), + "Failed to get PROGRAM_B gpio\n"); + + core->init_b = xilinx_core_devm_gpiod_get(core->dev, "init", "init-b", + GPIOD_IN); + if (IS_ERR(core->init_b)) + return dev_err_probe(core->dev, PTR_ERR(core->init_b), + "Failed to get INIT_B gpio\n"); + + core->done = devm_gpiod_get(core->dev, "done", GPIOD_IN); + if (IS_ERR(core->done)) + return dev_err_probe(core->dev, PTR_ERR(core->done), + "Failed to get DONE gpio\n"); + + mgr = devm_fpga_mgr_register(core->dev, + "Xilinx Slave Serial FPGA Manager", + &xilinx_core_ops, core); + return PTR_ERR_OR_ZERO(mgr); +} +EXPORT_SYMBOL_GPL(xilinx_core_probe); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>"); +MODULE_DESCRIPTION("Xilinx 7 Series FPGA manager core"); diff --git a/drivers/fpga/xilinx-core.h b/drivers/fpga/xilinx-core.h new file mode 100644 index 000000000000..f02ac67fce7b --- /dev/null +++ b/drivers/fpga/xilinx-core.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __XILINX_CORE_H +#define __XILINX_CORE_H + +#include <linux/device.h> + +/** + * struct xilinx_fpga_core - interface between the driver and the core manager + * of Xilinx 7 Series FPGA manager + * @dev: device node + * @write: write callback of the driver + */ +struct xilinx_fpga_core { +/* public: */ + struct device *dev; + int (*write)(struct xilinx_fpga_core *core, const char *buf, + size_t count); +/* private: handled by xilinx-core */ + struct gpio_desc *prog_b; + struct gpio_desc *init_b; + struct gpio_desc *done; +}; + +int xilinx_core_probe(struct xilinx_fpga_core *core); + +#endif /* __XILINX_CORE_H */ diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c index ea2bde6e5bc4..822751fad18a 100644 --- a/drivers/fpga/xilinx-pr-decoupler.c +++ b/drivers/fpga/xilinx-pr-decoupler.c @@ -10,8 +10,10 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/kernel.h> -#include <linux/of_device.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/fpga/fpga-bridge.h> #define CTRL_CMD_DECOUPLE BIT(0) @@ -69,7 +71,7 @@ static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge) if (err) return err; - status = readl(priv->io_base); + status = xlnx_pr_decouple_read(priv, CTRL_OFFSET); clk_disable(priv->clk); @@ -102,26 +104,17 @@ MODULE_DEVICE_TABLE(of, xlnx_pr_decoupler_of_match); static int xlnx_pr_decoupler_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct xlnx_pr_decoupler_data *priv; struct fpga_bridge *br; int err; - struct resource *res; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - if (np) { - const struct of_device_id *match; + priv->ipconfig = device_get_match_data(&pdev->dev); - match = of_match_node(xlnx_pr_decoupler_of_match, np); - if (match && match->data) - priv->ipconfig = match->data; - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->io_base = devm_ioremap_resource(&pdev->dev, res); + priv->io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->io_base)) return PTR_ERR(priv->io_base); @@ -138,22 +131,17 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev) clk_disable(priv->clk); - br = devm_fpga_bridge_create(&pdev->dev, priv->ipconfig->name, - &xlnx_pr_decoupler_br_ops, priv); - if (!br) { - err = -ENOMEM; - goto err_clk; - } - - platform_set_drvdata(pdev, br); - - err = fpga_bridge_register(br); - if (err) { + br = fpga_bridge_register(&pdev->dev, priv->ipconfig->name, + &xlnx_pr_decoupler_br_ops, priv); + if (IS_ERR(br)) { + err = PTR_ERR(br); dev_err(&pdev->dev, "unable to register %s", priv->ipconfig->name); goto err_clk; } + platform_set_drvdata(pdev, br); + return 0; err_clk: @@ -162,7 +150,7 @@ err_clk: return err; } -static int xlnx_pr_decoupler_remove(struct platform_device *pdev) +static void xlnx_pr_decoupler_remove(struct platform_device *pdev) { struct fpga_bridge *bridge = platform_get_drvdata(pdev); struct xlnx_pr_decoupler_data *p = bridge->priv; @@ -170,8 +158,6 @@ static int xlnx_pr_decoupler_remove(struct platform_device *pdev) fpga_bridge_unregister(bridge); clk_unprepare(p->clk); - - return 0; } static struct platform_driver xlnx_pr_decoupler_driver = { @@ -179,7 +165,7 @@ static struct platform_driver xlnx_pr_decoupler_driver = { .remove = xlnx_pr_decoupler_remove, .driver = { .name = "xlnx_pr_decoupler", - .of_match_table = of_match_ptr(xlnx_pr_decoupler_of_match), + .of_match_table = xlnx_pr_decoupler_of_match, }, }; diff --git a/drivers/fpga/xilinx-selectmap.c b/drivers/fpga/xilinx-selectmap.c new file mode 100644 index 000000000000..2cd87e7e913f --- /dev/null +++ b/drivers/fpga/xilinx-selectmap.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Xilinx Spartan6 and 7 Series SelectMAP interface driver + * + * (C) 2024 Charles Perry <charles.perry@savoirfairelinux.com> + * + * Manage Xilinx FPGA firmware loaded over the SelectMAP configuration + * interface. + */ + +#include "xilinx-core.h" + +#include <linux/gpio/consumer.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +struct xilinx_selectmap_conf { + struct xilinx_fpga_core core; + void __iomem *base; +}; + +#define to_xilinx_selectmap_conf(obj) \ + container_of(obj, struct xilinx_selectmap_conf, core) + +static int xilinx_selectmap_write(struct xilinx_fpga_core *core, + const char *buf, size_t count) +{ + struct xilinx_selectmap_conf *conf = to_xilinx_selectmap_conf(core); + size_t i; + + for (i = 0; i < count; ++i) + writeb(buf[i], conf->base); + + return 0; +} + +static int xilinx_selectmap_probe(struct platform_device *pdev) +{ + struct xilinx_selectmap_conf *conf; + struct gpio_desc *gpio; + void __iomem *base; + + conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL); + if (!conf) + return -ENOMEM; + + conf->core.dev = &pdev->dev; + conf->core.write = xilinx_selectmap_write; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(base)) + return dev_err_probe(&pdev->dev, PTR_ERR(base), + "ioremap error\n"); + conf->base = base; + + /* CSI_B is active low */ + gpio = devm_gpiod_get_optional(&pdev->dev, "csi", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(gpio), + "Failed to get CSI_B gpio\n"); + + /* RDWR_B is active low */ + gpio = devm_gpiod_get_optional(&pdev->dev, "rdwr", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(gpio), + "Failed to get RDWR_B gpio\n"); + + return xilinx_core_probe(&conf->core); +} + +static const struct of_device_id xlnx_selectmap_of_match[] = { + { .compatible = "xlnx,fpga-xc7s-selectmap", }, // Spartan-7 + { .compatible = "xlnx,fpga-xc7a-selectmap", }, // Artix-7 + { .compatible = "xlnx,fpga-xc7k-selectmap", }, // Kintex-7 + { .compatible = "xlnx,fpga-xc7v-selectmap", }, // Virtex-7 + {}, +}; +MODULE_DEVICE_TABLE(of, xlnx_selectmap_of_match); + +static struct platform_driver xilinx_selectmap_driver = { + .driver = { + .name = "xilinx-selectmap", + .of_match_table = xlnx_selectmap_of_match, + }, + .probe = xilinx_selectmap_probe, +}; + +module_platform_driver(xilinx_selectmap_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Charles Perry <charles.perry@savoirfairelinux.com>"); +MODULE_DESCRIPTION("Load Xilinx FPGA firmware over SelectMap"); diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c index fee4d0abf6bf..e294e3a6cc03 100644 --- a/drivers/fpga/xilinx-spi.c +++ b/drivers/fpga/xilinx-spi.c @@ -10,127 +10,17 @@ * the slave serial configuration interface. */ -#include <linux/delay.h> -#include <linux/device.h> -#include <linux/fpga/fpga-mgr.h> -#include <linux/gpio/consumer.h> +#include "xilinx-core.h" + #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/of.h> #include <linux/spi/spi.h> -#include <linux/sizes.h> - -struct xilinx_spi_conf { - struct spi_device *spi; - struct gpio_desc *prog_b; - struct gpio_desc *init_b; - struct gpio_desc *done; -}; - -static int get_done_gpio(struct fpga_manager *mgr) -{ - struct xilinx_spi_conf *conf = mgr->priv; - int ret; - - ret = gpiod_get_value(conf->done); - - if (ret < 0) - dev_err(&mgr->dev, "Error reading DONE (%d)\n", ret); - - return ret; -} - -static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr) -{ - if (!get_done_gpio(mgr)) - return FPGA_MGR_STATE_RESET; - - return FPGA_MGR_STATE_UNKNOWN; -} - -/** - * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait - * a given delay if the pin is unavailable - * - * @mgr: The FPGA manager object - * @value: Value INIT_B to wait for (1 = asserted = low) - * @alt_udelay: Delay to wait if the INIT_B GPIO is not available - * - * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if - * too much time passed waiting for that. If no INIT_B GPIO is available - * then always return 0. - */ -static int wait_for_init_b(struct fpga_manager *mgr, int value, - unsigned long alt_udelay) -{ - struct xilinx_spi_conf *conf = mgr->priv; - unsigned long timeout = jiffies + msecs_to_jiffies(1000); - - if (conf->init_b) { - while (time_before(jiffies, timeout)) { - int ret = gpiod_get_value(conf->init_b); - - if (ret == value) - return 0; - - if (ret < 0) { - dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret); - return ret; - } - - usleep_range(100, 400); - } - - dev_err(&mgr->dev, "Timeout waiting for INIT_B to %s\n", - value ? "assert" : "deassert"); - return -ETIMEDOUT; - } - - udelay(alt_udelay); - - return 0; -} - -static int xilinx_spi_write_init(struct fpga_manager *mgr, - struct fpga_image_info *info, - const char *buf, size_t count) -{ - struct xilinx_spi_conf *conf = mgr->priv; - int err; - - if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { - dev_err(&mgr->dev, "Partial reconfiguration not supported\n"); - return -EINVAL; - } - - gpiod_set_value(conf->prog_b, 1); - - err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */ - if (err) { - gpiod_set_value(conf->prog_b, 0); - return err; - } - - gpiod_set_value(conf->prog_b, 0); - err = wait_for_init_b(mgr, 0, 0); - if (err) - return err; - - if (get_done_gpio(mgr)) { - dev_err(&mgr->dev, "Unexpected DONE pin state...\n"); - return -EIO; - } - - /* program latency */ - usleep_range(7500, 7600); - return 0; -} - -static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf, +static int xilinx_spi_write(struct xilinx_fpga_core *core, const char *buf, size_t count) { - struct xilinx_spi_conf *conf = mgr->priv; + struct spi_device *spi = to_spi_device(core->dev); const char *fw_data = buf; const char *fw_data_end = fw_data + count; @@ -141,9 +31,9 @@ static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf, remaining = fw_data_end - fw_data; stride = min_t(size_t, remaining, SZ_4K); - ret = spi_write(conf->spi, fw_data, stride); + ret = spi_write(spi, fw_data, stride); if (ret) { - dev_err(&mgr->dev, "SPI error in firmware write: %d\n", + dev_err(core->dev, "SPI error in firmware write: %d\n", ret); return ret; } @@ -153,114 +43,35 @@ static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf, return 0; } -static int xilinx_spi_apply_cclk_cycles(struct xilinx_spi_conf *conf) -{ - struct spi_device *spi = conf->spi; - const u8 din_data[1] = { 0xff }; - int ret; - - ret = spi_write(conf->spi, din_data, sizeof(din_data)); - if (ret) - dev_err(&spi->dev, "applying CCLK cycles failed: %d\n", ret); - - return ret; -} - -static int xilinx_spi_write_complete(struct fpga_manager *mgr, - struct fpga_image_info *info) -{ - struct xilinx_spi_conf *conf = mgr->priv; - unsigned long timeout = jiffies + usecs_to_jiffies(info->config_complete_timeout_us); - bool expired = false; - int done; - int ret; - - /* - * This loop is carefully written such that if the driver is - * scheduled out for more than 'timeout', we still check for DONE - * before giving up and we apply 8 extra CCLK cycles in all cases. - */ - while (!expired) { - expired = time_after(jiffies, timeout); - - done = get_done_gpio(mgr); - if (done < 0) - return done; - - ret = xilinx_spi_apply_cclk_cycles(conf); - if (ret) - return ret; - - if (done) - return 0; - } - - if (conf->init_b) { - ret = gpiod_get_value(conf->init_b); - - if (ret < 0) { - dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret); - return ret; - } - - dev_err(&mgr->dev, - ret ? "CRC error or invalid device\n" - : "Missing sync word or incomplete bitstream\n"); - } else { - dev_err(&mgr->dev, "Timeout after config data transfer\n"); - } - - return -ETIMEDOUT; -} - -static const struct fpga_manager_ops xilinx_spi_ops = { - .state = xilinx_spi_state, - .write_init = xilinx_spi_write_init, - .write = xilinx_spi_write, - .write_complete = xilinx_spi_write_complete, -}; - static int xilinx_spi_probe(struct spi_device *spi) { - struct xilinx_spi_conf *conf; - struct fpga_manager *mgr; + struct xilinx_fpga_core *core; - conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL); - if (!conf) + core = devm_kzalloc(&spi->dev, sizeof(*core), GFP_KERNEL); + if (!core) return -ENOMEM; - conf->spi = spi; - - /* PROGRAM_B is active low */ - conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW); - if (IS_ERR(conf->prog_b)) - return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b), - "Failed to get PROGRAM_B gpio\n"); - - conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN); - if (IS_ERR(conf->init_b)) - return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b), - "Failed to get INIT_B gpio\n"); + core->dev = &spi->dev; + core->write = xilinx_spi_write; - conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN); - if (IS_ERR(conf->done)) - return dev_err_probe(&spi->dev, PTR_ERR(conf->done), - "Failed to get DONE gpio\n"); - - mgr = devm_fpga_mgr_create(&spi->dev, - "Xilinx Slave Serial FPGA Manager", - &xilinx_spi_ops, conf); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(&spi->dev, mgr); + return xilinx_core_probe(core); } +static const struct spi_device_id xilinx_spi_ids[] = { + { "fpga-slave-serial" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, xilinx_spi_ids); + +#ifdef CONFIG_OF static const struct of_device_id xlnx_spi_of_match[] = { - { .compatible = "xlnx,fpga-slave-serial", }, + { + .compatible = "xlnx,fpga-slave-serial", + }, {} }; MODULE_DEVICE_TABLE(of, xlnx_spi_of_match); +#endif static struct spi_driver xilinx_slave_spi_driver = { .driver = { @@ -268,6 +79,7 @@ static struct spi_driver xilinx_slave_spi_driver = { .of_match_table = of_match_ptr(xlnx_spi_of_match), }, .probe = xilinx_spi_probe, + .id_table = xilinx_spi_ids, }; module_spi_driver(xilinx_slave_spi_driver) diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 07fa8d9ec675..b7629a0e4813 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -192,7 +192,7 @@ static void zynq_step_dma(struct zynq_fpga_priv *priv) /* Once the first transfer is queued we can turn on the ISR, future * calls to zynq_step_dma will happen from the ISR context. The - * dma_lock spinlock guarentees this handover is done coherently, the + * dma_lock spinlock guarantees this handover is done coherently, the * ISR enable is put at the end to avoid another CPU spinning in the * ISR on this lock. */ @@ -267,7 +267,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, ctrl = zynq_fpga_read(priv, CTRL_OFFSET); if (!(ctrl & CTRL_SEC_EN_MASK)) { dev_err(&mgr->dev, - "System not secure, can't use crypted bitstreams\n"); + "System not secure, can't use encrypted bitstreams\n"); err = -EINVAL; goto out_err; } @@ -344,7 +344,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, /* set configuration register with following options: * - enable PCAP interface - * - set throughput for maximum speed (if bistream not crypted) + * - set throughput for maximum speed (if bistream not encrypted) * - set CPU in user mode */ ctrl = zynq_fpga_read(priv, CTRL_OFFSET); @@ -387,7 +387,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) const char *why; int err; u32 intr_status; - unsigned long timeout; + unsigned long time_left; unsigned long flags; struct scatterlist *sg; int i; @@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) } } - priv->dma_nelms = - dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); - if (priv->dma_nelms == 0) { + err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); + if (err) { dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n"); - return -ENOMEM; + return err; } + priv->dma_nelms = sgt->nents; /* enable clock */ err = clk_enable(priv->clk); @@ -427,8 +427,8 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) zynq_step_dma(priv); spin_unlock_irqrestore(&priv->dma_lock, flags); - timeout = wait_for_completion_timeout(&priv->dma_done, - msecs_to_jiffies(DMA_TIMEOUT_MS)); + time_left = wait_for_completion_timeout(&priv->dma_done, + msecs_to_jiffies(DMA_TIMEOUT_MS)); spin_lock_irqsave(&priv->dma_lock, flags); zynq_fpga_set_irq(priv, 0); @@ -452,7 +452,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) if (priv->cur_sg || !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) { - if (timeout == 0) + if (time_left == 0) why = "DMA timed out"; else why = "DMA did not complete"; @@ -478,7 +478,7 @@ out_clk: clk_disable(priv->clk); out_free: - dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); return err; } @@ -493,15 +493,15 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, if (err) return err; - /* Release 'PR' control back to the ICAP */ - zynq_fpga_write(priv, CTRL_OFFSET, - zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK); - err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status, intr_status & IXR_PCFG_DONE_MASK, INIT_POLL_DELAY, INIT_POLL_TIMEOUT); + /* Release 'PR' control back to the ICAP */ + zynq_fpga_write(priv, CTRL_OFFSET, + zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK); + clk_disable(priv->clk); if (err) @@ -555,7 +555,6 @@ static int zynq_fpga_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct zynq_fpga_priv *priv; struct fpga_manager *mgr; - struct resource *res; int err; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -563,8 +562,7 @@ static int zynq_fpga_probe(struct platform_device *pdev) return -ENOMEM; spin_lock_init(&priv->dma_lock); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->io_base = devm_ioremap_resource(dev, res); + priv->io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->io_base)) return PTR_ERR(priv->io_base); @@ -582,11 +580,9 @@ static int zynq_fpga_probe(struct platform_device *pdev) return priv->irq; priv->clk = devm_clk_get(dev, "ref_clk"); - if (IS_ERR(priv->clk)) { - if (PTR_ERR(priv->clk) != -EPROBE_DEFER) - dev_err(dev, "input clock not found\n"); - return PTR_ERR(priv->clk); - } + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "input clock not found\n"); err = clk_prepare_enable(priv->clk); if (err) { @@ -609,24 +605,20 @@ static int zynq_fpga_probe(struct platform_device *pdev) clk_disable(priv->clk); - mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager", - &zynq_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - platform_set_drvdata(pdev, mgr); - - err = fpga_mgr_register(mgr); - if (err) { + mgr = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager", + &zynq_fpga_ops, priv); + if (IS_ERR(mgr)) { dev_err(dev, "unable to register FPGA manager\n"); clk_unprepare(priv->clk); - return err; + return PTR_ERR(mgr); } + platform_set_drvdata(pdev, mgr); + return 0; } -static int zynq_fpga_remove(struct platform_device *pdev) +static void zynq_fpga_remove(struct platform_device *pdev) { struct zynq_fpga_priv *priv; struct fpga_manager *mgr; @@ -637,8 +629,6 @@ static int zynq_fpga_remove(struct platform_device *pdev) fpga_mgr_unregister(mgr); clk_unprepare(priv->clk); - - return 0; } #ifdef CONFIG_OF diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c index 125743c9797f..f3434e2c487b 100644 --- a/drivers/fpga/zynqmp-fpga.c +++ b/drivers/fpga/zynqmp-fpga.c @@ -66,12 +66,6 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr, return ret; } -static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr, - struct fpga_image_info *info) -{ - return 0; -} - static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr) { u32 status = 0; @@ -83,11 +77,30 @@ static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr) return FPGA_MGR_STATE_UNKNOWN; } +static ssize_t status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 status; + int ret; + + ret = zynqmp_pm_fpga_get_config_status(&status); + if (ret) + return ret; + + return sysfs_emit(buf, "0x%x\n", status); +} +static DEVICE_ATTR_RO(status); + +static struct attribute *zynqmp_fpga_attrs[] = { + &dev_attr_status.attr, + NULL, +}; +ATTRIBUTE_GROUPS(zynqmp_fpga); + static const struct fpga_manager_ops zynqmp_fpga_ops = { .state = zynqmp_fpga_ops_state, .write_init = zynqmp_fpga_ops_write_init, .write = zynqmp_fpga_ops_write, - .write_complete = zynqmp_fpga_ops_write_complete, }; static int zynqmp_fpga_probe(struct platform_device *pdev) @@ -102,26 +115,25 @@ static int zynqmp_fpga_probe(struct platform_device *pdev) priv->dev = dev; - mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager", - &zynqmp_fpga_ops, priv); - if (!mgr) - return -ENOMEM; - - return devm_fpga_mgr_register(dev, mgr); + mgr = devm_fpga_mgr_register(dev, "Xilinx ZynqMP FPGA Manager", + &zynqmp_fpga_ops, priv); + return PTR_ERR_OR_ZERO(mgr); } +#ifdef CONFIG_OF static const struct of_device_id zynqmp_fpga_of_match[] = { { .compatible = "xlnx,zynqmp-pcap-fpga", }, {}, }; - MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match); +#endif static struct platform_driver zynqmp_fpga_driver = { .probe = zynqmp_fpga_probe, .driver = { .name = "zynqmp_fpga_manager", .of_match_table = of_match_ptr(zynqmp_fpga_of_match), + .dev_groups = zynqmp_fpga_groups, }, }; |
