summaryrefslogtreecommitdiff
path: root/drivers/pci/endpoint/functions
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/endpoint/functions')
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c56
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c664
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c45
3 files changed, 418 insertions, 347 deletions
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 2c54d80107cf..6643a88c7a0c 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -125,7 +125,7 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
static struct pci_epf_header sa8775p_header = {
.vendorid = PCI_VENDOR_ID_QCOM,
- .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */
+ .deviceid = 0x0116,
.baseclass_code = PCI_CLASS_OTHERS,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
@@ -137,6 +137,7 @@ static const struct pci_epf_mhi_ep_info sa8775p_info = {
.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
.msi_count = 32,
.mru = 0x8000,
+ .flags = MHI_EPF_USE_DMA,
};
struct pci_epf_mhi {
@@ -716,7 +717,7 @@ static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
epf_mhi->dma_chan_rx = NULL;
}
-static int pci_epf_mhi_core_init(struct pci_epf *epf)
+static int pci_epf_mhi_epc_init(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
@@ -753,9 +754,35 @@ static int pci_epf_mhi_core_init(struct pci_epf *epf)
if (!epf_mhi->epc_features)
return -ENODATA;
+ if (info->flags & MHI_EPF_USE_DMA) {
+ ret = pci_epf_mhi_dma_init(epf_mhi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DMA: %d\n", ret);
+ return ret;
+ }
+ }
+
return 0;
}
+static void pci_epf_mhi_epc_deinit(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+
+ if (mhi_cntrl->mhi_dev) {
+ mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ }
+
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+}
+
static int pci_epf_mhi_link_up(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
@@ -765,14 +792,6 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
struct device *dev = &epf->dev;
int ret;
- if (info->flags & MHI_EPF_USE_DMA) {
- ret = pci_epf_mhi_dma_init(epf_mhi);
- if (ret) {
- dev_err(dev, "Failed to initialize DMA: %d\n", ret);
- return ret;
- }
- }
-
mhi_cntrl->mmio = epf_mhi->mmio;
mhi_cntrl->irq = epf_mhi->irq;
mhi_cntrl->mru = info->mru;
@@ -819,7 +838,7 @@ static int pci_epf_mhi_link_down(struct pci_epf *epf)
return 0;
}
-static int pci_epf_mhi_bme(struct pci_epf *epf)
+static int pci_epf_mhi_bus_master_enable(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
@@ -848,12 +867,18 @@ static int pci_epf_mhi_bind(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
struct platform_device *pdev = to_platform_device(epc->dev.parent);
struct resource *res;
int ret;
/* Get MMIO base address from Endpoint controller */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+ if (!res) {
+ dev_err(dev, "Failed to get \"mmio\" resource\n");
+ return -ENODEV;
+ }
+
epf_mhi->mmio_phys = res->start;
epf_mhi->mmio_size = resource_size(res);
@@ -882,8 +907,8 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
/*
* Forcefully power down the MHI EP stack. Only way to bring the MHI EP
- * stack back to working state after successive bind is by getting BME
- * from host.
+ * stack back to working state after successive bind is by getting Bus
+ * Master Enable event from host.
*/
if (mhi_cntrl->mhi_dev) {
mhi_ep_power_down(mhi_cntrl);
@@ -897,10 +922,11 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
}
static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
- .core_init = pci_epf_mhi_core_init,
+ .epc_init = pci_epf_mhi_epc_init,
+ .epc_deinit = pci_epf_mhi_epc_deinit,
.link_up = pci_epf_mhi_link_up,
.link_down = pci_epf_mhi_link_down,
- .bme = pci_epf_mhi_bme,
+ .bus_master_enable = pci_epf_mhi_bus_master_enable,
};
static int pci_epf_mhi_probe(struct pci_epf *epf,
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index cd4ffb39dcdc..50eb4106369f 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -44,6 +44,11 @@
#define TIMER_RESOLUTION 1
+#define CAP_UNALIGNED_ACCESS BIT(0)
+#define CAP_MSI BIT(1)
+#define CAP_MSIX BIT(2)
+#define CAP_INTX BIT(3)
+
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
@@ -64,16 +69,17 @@ struct pci_epf_test {
};
struct pci_epf_test_reg {
- u32 magic;
- u32 command;
- u32 status;
- u64 src_addr;
- u64 dst_addr;
- u32 size;
- u32 checksum;
- u32 irq_type;
- u32 irq_number;
- u32 flags;
+ __le32 magic;
+ __le32 command;
+ __le32 status;
+ __le64 src_addr;
+ __le64 dst_addr;
+ __le32 size;
+ __le32 checksum;
+ __le32 irq_type;
+ __le32 irq_number;
+ __le32 flags;
+ __le32 caps;
} __packed;
static struct pci_epf_header test_header = {
@@ -251,7 +257,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
fail_back_rx:
dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
fail_back_tx:
dma_cap_zero(mask);
@@ -291,8 +297,6 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
dma_release_channel(epf_test->dma_chan_rx);
epf_test->dma_chan_rx = NULL;
-
- return;
}
static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
@@ -317,250 +321,281 @@ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
static void pci_epf_test_copy(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
- int ret;
- void __iomem *src_addr;
- void __iomem *dst_addr;
- phys_addr_t src_phys_addr;
- phys_addr_t dst_phys_addr;
+ int ret = 0;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
- struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
-
- src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
- if (!src_addr) {
- dev_err(dev, "Failed to allocate source address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
- reg->src_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map source address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- goto err_src_addr;
- }
-
- dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
- if (!dst_addr) {
- dev_err(dev, "Failed to allocate destination address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- ret = -ENOMEM;
- goto err_src_map_addr;
+ struct device *dev = &epf->dev;
+ struct pci_epc_map src_map, dst_map;
+ u64 src_addr = le64_to_cpu(reg->src_addr);
+ u64 dst_addr = le64_to_cpu(reg->dst_addr);
+ size_t orig_size, copy_size;
+ ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 status = 0;
+ void *copy_buf = NULL, *buf;
+
+ orig_size = copy_size = le32_to_cpu(reg->size);
+
+ if (flags & FLAG_USE_DMA) {
+ if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
+ dev_err(dev, "DMA controller doesn't support MEMCPY\n");
+ ret = -EINVAL;
+ goto set_status;
+ }
+ } else {
+ copy_buf = kzalloc(copy_size, GFP_KERNEL);
+ if (!copy_buf) {
+ ret = -ENOMEM;
+ goto set_status;
+ }
+ buf = copy_buf;
}
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
- reg->dst_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map destination address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- goto err_dst_addr;
- }
+ while (copy_size) {
+ ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
+ src_addr, copy_size, &src_map);
+ if (ret) {
+ dev_err(dev, "Failed to map source address\n");
+ status = STATUS_SRC_ADDR_INVALID;
+ goto free_buf;
+ }
- ktime_get_ts64(&start);
- if (reg->flags & FLAG_USE_DMA) {
- if (epf_test->dma_private) {
- dev_err(dev, "Cannot transfer data using DMA\n");
- ret = -EINVAL;
- goto err_map_addr;
+ ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
+ dst_addr, copy_size, &dst_map);
+ if (ret) {
+ dev_err(dev, "Failed to map destination address\n");
+ status = STATUS_DST_ADDR_INVALID;
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
+ &src_map);
+ goto free_buf;
}
- ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- src_phys_addr, reg->size, 0,
- DMA_MEM_TO_MEM);
- if (ret)
- dev_err(dev, "Data transfer failed\n");
- } else {
- void *buf;
+ map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
- buf = kzalloc(reg->size, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto err_map_addr;
+ ktime_get_ts64(&start);
+ if (flags & FLAG_USE_DMA) {
+ ret = pci_epf_test_data_transfer(epf_test,
+ dst_map.phys_addr, src_map.phys_addr,
+ map_size, 0, DMA_MEM_TO_MEM);
+ if (ret) {
+ dev_err(dev, "Data transfer failed\n");
+ goto unmap;
+ }
+ } else {
+ memcpy_fromio(buf, src_map.virt_addr, map_size);
+ memcpy_toio(dst_map.virt_addr, buf, map_size);
+ buf += map_size;
}
+ ktime_get_ts64(&end);
- memcpy_fromio(buf, src_addr, reg->size);
- memcpy_toio(dst_addr, buf, reg->size);
- kfree(buf);
- }
- ktime_get_ts64(&end);
- pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end,
- reg->flags & FLAG_USE_DMA);
+ copy_size -= map_size;
+ src_addr += map_size;
+ dst_addr += map_size;
-err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
+ map_size = 0;
+ }
-err_dst_addr:
- pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
+ pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
-err_src_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
+unmap:
+ if (map_size) {
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
+ }
-err_src_addr:
- pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
+free_buf:
+ kfree(copy_buf);
-err:
+set_status:
if (!ret)
- reg->status |= STATUS_COPY_SUCCESS;
+ status |= STATUS_COPY_SUCCESS;
else
- reg->status |= STATUS_COPY_FAIL;
+ status |= STATUS_COPY_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_read(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
- int ret;
- void __iomem *src_addr;
- void *buf;
+ int ret = 0;
+ void *src_buf, *buf;
u32 crc32;
- phys_addr_t phys_addr;
+ struct pci_epc_map map;
phys_addr_t dst_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
- struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
struct device *dma_dev = epf->epc->dev.parent;
+ u64 src_addr = le64_to_cpu(reg->src_addr);
+ size_t orig_size, src_size;
+ ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 checksum = le32_to_cpu(reg->checksum);
+ u32 status = 0;
- src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
- if (!src_addr) {
- dev_err(dev, "Failed to allocate address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
- reg->src_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- goto err_addr;
- }
+ orig_size = src_size = le32_to_cpu(reg->size);
- buf = kzalloc(reg->size, GFP_KERNEL);
- if (!buf) {
+ src_buf = kzalloc(src_size, GFP_KERNEL);
+ if (!src_buf) {
ret = -ENOMEM;
- goto err_map_addr;
+ goto set_status;
}
+ buf = src_buf;
- if (reg->flags & FLAG_USE_DMA) {
- dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_dev, dst_phys_addr)) {
- dev_err(dev, "Failed to map destination buffer addr\n");
- ret = -ENOMEM;
- goto err_dma_map;
+ while (src_size) {
+ ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
+ src_addr, src_size, &map);
+ if (ret) {
+ dev_err(dev, "Failed to map address\n");
+ status = STATUS_SRC_ADDR_INVALID;
+ goto free_buf;
}
- ktime_get_ts64(&start);
- ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- phys_addr, reg->size,
- reg->src_addr, DMA_DEV_TO_MEM);
- if (ret)
- dev_err(dev, "Data transfer failed\n");
- ktime_get_ts64(&end);
+ map_size = map.pci_size;
+ if (flags & FLAG_USE_DMA) {
+ dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dst_phys_addr)) {
+ dev_err(dev,
+ "Failed to map destination buffer addr\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test,
+ dst_phys_addr, map.phys_addr,
+ map_size, src_addr, DMA_DEV_TO_MEM);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, dst_phys_addr, map_size,
+ DMA_FROM_DEVICE);
+
+ if (ret)
+ goto unmap;
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_fromio(buf, map.virt_addr, map_size);
+ ktime_get_ts64(&end);
+ }
- dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
- DMA_FROM_DEVICE);
- } else {
- ktime_get_ts64(&start);
- memcpy_fromio(buf, src_addr, reg->size);
- ktime_get_ts64(&end);
+ src_size -= map_size;
+ src_addr += map_size;
+ buf += map_size;
+
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
+ map_size = 0;
}
- pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end,
- reg->flags & FLAG_USE_DMA);
+ pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
- crc32 = crc32_le(~0, buf, reg->size);
- if (crc32 != reg->checksum)
+ crc32 = crc32_le(~0, src_buf, orig_size);
+ if (crc32 != checksum)
ret = -EIO;
-err_dma_map:
- kfree(buf);
+unmap:
+ if (map_size)
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
-err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
+free_buf:
+ kfree(src_buf);
-err_addr:
- pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
-
-err:
+set_status:
if (!ret)
- reg->status |= STATUS_READ_SUCCESS;
+ status |= STATUS_READ_SUCCESS;
else
- reg->status |= STATUS_READ_FAIL;
+ status |= STATUS_READ_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_write(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
- int ret;
- void __iomem *dst_addr;
- void *buf;
- phys_addr_t phys_addr;
+ int ret = 0;
+ void *dst_buf, *buf;
+ struct pci_epc_map map;
phys_addr_t src_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
- struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
struct device *dma_dev = epf->epc->dev.parent;
+ u64 dst_addr = le64_to_cpu(reg->dst_addr);
+ size_t orig_size, dst_size;
+ ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 status = 0;
- dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
- if (!dst_addr) {
- dev_err(dev, "Failed to allocate address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
- reg->dst_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- goto err_addr;
- }
+ orig_size = dst_size = le32_to_cpu(reg->size);
- buf = kzalloc(reg->size, GFP_KERNEL);
- if (!buf) {
+ dst_buf = kzalloc(dst_size, GFP_KERNEL);
+ if (!dst_buf) {
ret = -ENOMEM;
- goto err_map_addr;
+ goto set_status;
}
+ get_random_bytes(dst_buf, dst_size);
+ reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
+ buf = dst_buf;
- get_random_bytes(buf, reg->size);
- reg->checksum = crc32_le(~0, buf, reg->size);
-
- if (reg->flags & FLAG_USE_DMA) {
- src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, src_phys_addr)) {
- dev_err(dev, "Failed to map source buffer addr\n");
- ret = -ENOMEM;
- goto err_dma_map;
+ while (dst_size) {
+ ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
+ dst_addr, dst_size, &map);
+ if (ret) {
+ dev_err(dev, "Failed to map address\n");
+ status = STATUS_DST_ADDR_INVALID;
+ goto free_buf;
}
- ktime_get_ts64(&start);
+ map_size = map.pci_size;
+ if (flags & FLAG_USE_DMA) {
+ src_phys_addr = dma_map_single(dma_dev, buf, map_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, src_phys_addr)) {
+ dev_err(dev,
+ "Failed to map source buffer addr\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ ktime_get_ts64(&start);
+
+ ret = pci_epf_test_data_transfer(epf_test,
+ map.phys_addr, src_phys_addr,
+ map_size, dst_addr,
+ DMA_MEM_TO_DEV);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, src_phys_addr, map_size,
+ DMA_TO_DEVICE);
+
+ if (ret)
+ goto unmap;
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_toio(map.virt_addr, buf, map_size);
+ ktime_get_ts64(&end);
+ }
- ret = pci_epf_test_data_transfer(epf_test, phys_addr,
- src_phys_addr, reg->size,
- reg->dst_addr,
- DMA_MEM_TO_DEV);
- if (ret)
- dev_err(dev, "Data transfer failed\n");
- ktime_get_ts64(&end);
+ dst_size -= map_size;
+ dst_addr += map_size;
+ buf += map_size;
- dma_unmap_single(dma_dev, src_phys_addr, reg->size,
- DMA_TO_DEVICE);
- } else {
- ktime_get_ts64(&start);
- memcpy_toio(dst_addr, buf, reg->size);
- ktime_get_ts64(&end);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
+ map_size = 0;
}
- pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end,
- reg->flags & FLAG_USE_DMA);
+ pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
/*
* wait 1ms inorder for the write to complete. Without this delay L3
@@ -568,20 +603,19 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test,
*/
usleep_range(1000, 2000);
-err_dma_map:
- kfree(buf);
+unmap:
+ if (map_size)
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
-err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
+free_buf:
+ kfree(dst_buf);
-err_addr:
- pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
-
-err:
+set_status:
if (!ret)
- reg->status |= STATUS_WRITE_SUCCESS;
+ status |= STATUS_WRITE_SUCCESS;
else
- reg->status |= STATUS_WRITE_FAIL;
+ status |= STATUS_WRITE_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
@@ -590,39 +624,42 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
- u32 status = reg->status | STATUS_IRQ_RAISED;
+ u32 status = le32_to_cpu(reg->status);
+ u32 irq_number = le32_to_cpu(reg->irq_number);
+ u32 irq_type = le32_to_cpu(reg->irq_type);
int count;
/*
* Set the status before raising the IRQ to ensure that the host sees
* the updated value when it gets the IRQ.
*/
- WRITE_ONCE(reg->status, status);
+ status |= STATUS_IRQ_RAISED;
+ WRITE_ONCE(reg->status, cpu_to_le32(status));
- switch (reg->irq_type) {
+ switch (irq_type) {
case IRQ_TYPE_INTX:
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
PCI_IRQ_INTX, 0);
break;
case IRQ_TYPE_MSI:
count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
- if (reg->irq_number > count || count <= 0) {
+ if (irq_number > count || count <= 0) {
dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
- reg->irq_number, count);
+ irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_IRQ_MSI, reg->irq_number);
+ PCI_IRQ_MSI, irq_number);
break;
case IRQ_TYPE_MSIX:
count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
- if (reg->irq_number > count || count <= 0) {
- dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
- reg->irq_number, count);
+ if (irq_number > count || count <= 0) {
+ dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
+ irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_IRQ_MSIX, reg->irq_number);
+ PCI_IRQ_MSIX, irq_number);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -639,21 +676,22 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
struct device *dev = &epf->dev;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ u32 irq_type = le32_to_cpu(reg->irq_type);
- command = READ_ONCE(reg->command);
+ command = le32_to_cpu(READ_ONCE(reg->command));
if (!command)
goto reset_handler;
WRITE_ONCE(reg->command, 0);
WRITE_ONCE(reg->status, 0);
- if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
+ if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
!epf_test->dma_supported) {
dev_err(dev, "Cannot transfer data using DMA\n");
goto reset_handler;
}
- if (reg->irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Failed to detect IRQ type\n");
goto reset_handler;
}
@@ -686,57 +724,24 @@ reset_handler:
msecs_to_jiffies(1));
}
-static void pci_epf_test_unbind(struct pci_epf *epf)
-{
- struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- struct pci_epc *epc = epf->epc;
- struct pci_epf_bar *epf_bar;
- int bar;
-
- cancel_delayed_work(&epf_test->cmd_handler);
- pci_epf_test_clean_dma_chan(epf_test);
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- epf_bar = &epf->bar[bar];
-
- if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
- pci_epf_free_space(epf, epf_test->reg[bar], bar,
- PRIMARY_INTERFACE);
- }
- }
-}
-
static int pci_epf_test_set_bar(struct pci_epf *epf)
{
- int bar, add;
- int ret;
- struct pci_epf_bar *epf_bar;
+ int bar, ret;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- const struct pci_epc_features *epc_features;
- epc_features = epf_test->epc_features;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- /*
- * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
- * if the specific implementation required a 64-bit BAR,
- * even if we only requested a 32-bit BAR.
- */
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
-
- if (epc_features->bar[bar].type == BAR_RESERVED)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
+ &epf->bar[bar]);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
@@ -746,22 +751,59 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
return 0;
}
-static int pci_epf_test_core_init(struct pci_epf *epf)
+static void pci_epf_test_clear_bar(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
+ continue;
+
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[bar]);
+ }
+}
+
+static void pci_epf_test_set_capabilities(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ struct pci_epc *epc = epf->epc;
+ u32 caps = 0;
+
+ if (epc->ops->align_addr)
+ caps |= CAP_UNALIGNED_ACCESS;
+
+ if (epf_test->epc_features->msi_capable)
+ caps |= CAP_MSI;
+
+ if (epf_test->epc_features->msix_capable)
+ caps |= CAP_MSIX;
+
+ if (epf_test->epc_features->intx_capable)
+ caps |= CAP_INTX;
+
+ reg->caps = cpu_to_le32(caps);
+}
+
+static int pci_epf_test_epc_init(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epf_header *header = epf->header;
- const struct pci_epc_features *epc_features;
+ const struct pci_epc_features *epc_features = epf_test->epc_features;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
- bool msix_capable = false;
- bool msi_capable = true;
+ bool linkup_notifier = false;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
- if (epc_features) {
- msix_capable = epc_features->msix_capable;
- msi_capable = epc_features->msi_capable;
- }
+ epf_test->dma_supported = true;
+
+ ret = pci_epf_test_init_dma_chan(epf_test);
+ if (ret)
+ epf_test->dma_supported = false;
if (epf->vfunc_no <= 1) {
ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
@@ -771,11 +813,13 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
+ pci_epf_test_set_capabilities(epf);
+
ret = pci_epf_test_set_bar(epf);
if (ret)
return ret;
- if (msi_capable) {
+ if (epc_features->msi_capable) {
ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
epf->msi_interrupts);
if (ret) {
@@ -784,7 +828,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
- if (msix_capable) {
+ if (epc_features->msix_capable) {
ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
epf->msix_interrupts,
epf_test->test_reg_bar,
@@ -795,9 +839,22 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
+ linkup_notifier = epc_features->linkup_notifier;
+ if (!linkup_notifier)
+ queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+
return 0;
}
+static void pci_epf_test_epc_deinit(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+ pci_epf_test_clean_dma_chan(epf_test);
+ pci_epf_test_clear_bar(epf);
+}
+
static int pci_epf_test_link_up(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -808,32 +865,38 @@ static int pci_epf_test_link_up(struct pci_epf *epf)
return 0;
}
+static int pci_epf_test_link_down(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+
+ return 0;
+}
+
static const struct pci_epc_event_ops pci_epf_test_event_ops = {
- .core_init = pci_epf_test_core_init,
+ .epc_init = pci_epf_test_epc_init,
+ .epc_deinit = pci_epf_test_epc_deinit,
.link_up = pci_epf_test_link_up,
+ .link_down = pci_epf_test_link_down,
};
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
- struct pci_epf_bar *epf_bar;
size_t msix_table_size = 0;
size_t test_reg_bar_size;
size_t pba_size = 0;
- bool msix_capable;
void *base;
- int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- const struct pci_epc_features *epc_features;
+ enum pci_barno bar;
+ const struct pci_epc_features *epc_features = epf_test->epc_features;
size_t test_reg_size;
- epc_features = epf_test->epc_features;
-
test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
- msix_capable = epc_features->msix_capable;
- if (msix_capable) {
+ if (epc_features->msix_capable) {
msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
epf_test->msix_table_offset = test_reg_bar_size;
/* Align to QWORD or 8 Bytes */
@@ -849,16 +912,14 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
epf_test->reg[test_reg_bar] = base;
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
+ for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
+ bar = pci_epc_get_next_free_bar(epc_features, bar);
+ if (bar == NO_BAR)
+ break;
if (bar == test_reg_bar)
continue;
- if (epc_features->bar[bar].type == BAR_RESERVED)
- continue;
-
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
epc_features, PRIMARY_INTERFACE);
if (!base)
@@ -870,16 +931,18 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
return 0;
}
-static void pci_epf_configure_bar(struct pci_epf *epf,
- const struct pci_epc_features *epc_features)
+static void pci_epf_test_free_space(struct pci_epf *epf)
{
- struct pci_epf_bar *epf_bar;
- int i;
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ int bar;
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- epf_bar = &epf->bar[i];
- if (epc_features->bar[i].only_64bit)
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
+ continue;
+
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
}
}
@@ -890,8 +953,6 @@ static int pci_epf_test_bind(struct pci_epf *epf)
const struct pci_epc_features *epc_features;
enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
- bool linkup_notifier = false;
- bool core_init_notifier = false;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
@@ -902,12 +963,9 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return -EOPNOTSUPP;
}
- linkup_notifier = epc_features->linkup_notifier;
- core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
if (test_reg_bar < 0)
return -EINVAL;
- pci_epf_configure_bar(epf, epc_features);
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
@@ -916,22 +974,20 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
return ret;
- if (!core_init_notifier) {
- ret = pci_epf_test_core_init(epf);
- if (ret)
- return ret;
- }
-
- epf_test->dma_supported = true;
-
- ret = pci_epf_test_init_dma_chan(epf_test);
- if (ret)
- epf_test->dma_supported = false;
+ return 0;
+}
- if (!linkup_notifier && !core_init_notifier)
- queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+static void pci_epf_test_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
- return 0;
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+ if (epc->init_complete) {
+ pci_epf_test_clean_dma_chan(epf_test);
+ pci_epf_test_clear_bar(epf);
+ }
+ pci_epf_test_free_space(epf);
}
static const struct pci_epf_device_id pci_epf_test_ids[] = {
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 8e779eecd62d..e4da3fdb0007 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -408,11 +408,9 @@ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
*/
static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
{
- size_t align;
enum pci_barno barno;
struct epf_ntb_ctrl *ctrl;
u32 spad_size, ctrl_size;
- u64 size;
struct pci_epf *epf = ntb->epf;
struct device *dev = &epf->dev;
u32 spad_count;
@@ -422,31 +420,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
epf->func_no,
epf->vfunc_no);
barno = ntb->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar[barno].fixed_size;
- align = epc_features->align;
-
- if ((!IS_ALIGNED(size, align)))
- return -EINVAL;
-
spad_count = ntb->spad_count;
- ctrl_size = sizeof(struct epf_ntb_ctrl);
+ ctrl_size = ALIGN(sizeof(struct epf_ntb_ctrl), sizeof(u32));
spad_size = 2 * spad_count * sizeof(u32);
- if (!align) {
- ctrl_size = roundup_pow_of_two(ctrl_size);
- spad_size = roundup_pow_of_two(spad_size);
- } else {
- ctrl_size = ALIGN(ctrl_size, align);
- spad_size = ALIGN(spad_size, align);
- }
-
- if (!size)
- size = ctrl_size + spad_size;
- else if (size < ctrl_size + spad_size)
- return -EINVAL;
-
- base = pci_epf_alloc_space(epf, size, barno, epc_features, 0);
+ base = pci_epf_alloc_space(epf, ctrl_size + spad_size,
+ barno, epc_features, 0);
if (!base) {
dev_err(dev, "Config/Status/SPAD alloc region fail\n");
return -ENOMEM;
@@ -799,8 +779,9 @@ err_config_interrupt:
*/
static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
{
- epf_ntb_db_bar_clear(ntb);
epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_config_sspad_bar_clear(ntb);
}
#define EPF_NTB_R(_name) \
@@ -1018,8 +999,10 @@ static int vpci_scan_bus(void *sysdata)
struct epf_ntb *ndev = sysdata;
vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
- if (vpci_bus)
- pr_err("create pci bus\n");
+ if (!vpci_bus) {
+ pr_err("create pci bus failed\n");
+ return -EINVAL;
+ }
pci_bus_add_devices(vpci_bus);
@@ -1335,13 +1318,19 @@ static int epf_ntb_bind(struct pci_epf *epf)
ret = pci_register_driver(&vntb_pci_driver);
if (ret) {
dev_err(dev, "failure register vntb pci driver\n");
- goto err_bar_alloc;
+ goto err_epc_cleanup;
}
- vpci_scan_bus(ntb);
+ ret = vpci_scan_bus(ntb);
+ if (ret)
+ goto err_unregister;
return 0;
+err_unregister:
+ pci_unregister_driver(&vntb_pci_driver);
+err_epc_cleanup:
+ epf_ntb_epc_cleanup(ntb);
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);