From 4e8c11b7fd29f70eb7af43bae908297689f2c3da Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 14 Dec 2016 02:49:15 +0300 Subject: NTB: Alter link-state API to support multi-port devices Multi-port devices permit the NTB connections between multiple domains, so a local device can have NTB link being up with one peer and being down with another. NTB link-state API is appropriately altered to return a bitfield of the link-states between the local device and possible peers. Signed-off-by: Serge Semin Acked-by: Allen Hubbe Signed-off-by: Jon Mason --- drivers/ntb/hw/amd/ntb_hw_amd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/ntb/hw/amd/ntb_hw_amd.c') diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 019a158e1128..e71ab4c1fe0e 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -212,7 +212,7 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev) return 0; } -static int amd_ntb_link_is_up(struct ntb_dev *ntb, +static u64 amd_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed, enum ntb_width *width) { -- cgit From 443b9a14ecbe811071467d54d6f2f1182835cc4d Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 11 Jan 2017 03:11:33 +0300 Subject: NTB: Alter MW API to support multi-ports devices Multi-port NTB devices permit to share a memory between all accessible peers. Memory Windows API is altered to correspondingly initialize and map memory windows for such devices: ntb_mw_count(pidx); - number of inbound memory windows, which can be allocated for shared buffer with specified peer device. ntb_mw_get_align(pidx, widx); - get alignment and size restriction parameters to properly allocate inbound memory region. ntb_peer_mw_count(); - get number of outbound memory windows. ntb_peer_mw_get_addr(widx); - get mapping address of an outbound memory window If hardware supports inbound translation configured on the local ntb port: ntb_mw_set_trans(pidx, widx); - set translation address of allocated inbound memory window so a peer device could access it. ntb_mw_clear_trans(pidx, widx); - clear the translation address of an inbound memory window. If hardware supports outbound translation configured on the peer ntb port: ntb_peer_mw_set_trans(pidx, widx); - set translation address of a memory window retrieved from a peer device ntb_peer_mw_clear_trans(pidx, widx); - clear the translation address of an outbound memory window Signed-off-by: Serge Semin Acked-by: Allen Hubbe Signed-off-by: Jon Mason --- drivers/ntb/hw/amd/ntb_hw_amd.c | 68 ++++++++++++++++++++++++++++++----------- 1 file changed, 51 insertions(+), 17 deletions(-) (limited to 'drivers/ntb/hw/amd/ntb_hw_amd.c') diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index e71ab4c1fe0e..bcefe1df9ce3 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -5,6 +5,7 @@ * GPL LICENSE SUMMARY * * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -13,6 +14,7 @@ * BSD LICENSE * * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -79,40 +81,42 @@ static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx) return 1 << idx; } -static int amd_ntb_mw_count(struct ntb_dev *ntb) +static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx) { + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + return ntb_ndev(ntb)->mw_count; } -static int amd_ntb_mw_get_range(struct ntb_dev *ntb, int idx, - phys_addr_t *base, - resource_size_t *size, - resource_size_t *align, - resource_size_t *align_size) +static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); int bar; + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + bar = ndev_mw_to_bar(ndev, idx); if (bar < 0) return bar; - if (base) - *base = pci_resource_start(ndev->ntb.pdev, bar); - - if (size) - *size = pci_resource_len(ndev->ntb.pdev, bar); + if (addr_align) + *addr_align = SZ_4K; - if (align) - *align = SZ_4K; + if (size_align) + *size_align = 1; - if (align_size) - *align_size = 1; + if (size_max) + *size_max = pci_resource_len(ndev->ntb.pdev, bar); return 0; } -static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, +static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, dma_addr_t addr, resource_size_t size) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); @@ -122,6 +126,9 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, u64 base_addr, limit, reg_val; int bar; + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + bar = ndev_mw_to_bar(ndev, idx); if (bar < 0) return bar; @@ -284,6 +291,31 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb) return 0; } +static int amd_ntb_peer_mw_count(struct ntb_dev *ntb) +{ + /* The same as for inbound MWs */ + return ntb_ndev(ntb)->mw_count; +} + +static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + phys_addr_t *base, resource_size_t *size) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + int bar; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + if (base) + *base = pci_resource_start(ndev->ntb.pdev, bar); + + if (size) + *size = pci_resource_len(ndev->ntb.pdev, bar); + + return 0; +} + static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb) { return ntb_ndev(ntb)->db_valid_mask; @@ -431,8 +463,10 @@ static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, static const struct ntb_dev_ops amd_ntb_ops = { .mw_count = amd_ntb_mw_count, - .mw_get_range = amd_ntb_mw_get_range, + .mw_get_align = amd_ntb_mw_get_align, .mw_set_trans = amd_ntb_mw_set_trans, + .peer_mw_count = amd_ntb_peer_mw_count, + .peer_mw_get_addr = amd_ntb_peer_mw_get_addr, .link_is_up = amd_ntb_link_is_up, .link_enable = amd_ntb_link_enable, .link_disable = amd_ntb_link_disable, -- cgit From d67288a39584daad11edee9b03d53264ba147453 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 11 Jan 2017 03:13:20 +0300 Subject: NTB: Alter Scratchpads API to support multi-ports devices Even though there is no any real NTB hardware, which would have both more than two ports and Scratchpad registers, it is logically correct to have Scratchpad API accepting a peer port index as well. Intel/AMD drivers utilize Primary and Secondary topology to split Scratchpad between connected root devices. Since port-index API introduced, Intel/AMD NTB hardware drivers can use device port to determine which Scratchpad registers actually belong to local and peer devices. The same approach can be used if some potential hardware in future will be multi-port and have some set of Scratchpads. Here are the brief of changes in the API: ntb_spad_count() - return number of Scratchpads per each port ntb_peer_spad_addr(pidx, sidx) - address of Scratchpad register of the peer device with pidx-index ntb_peer_spad_read(pidx, sidx) - read specified Scratchpad register of the peer with pidx-index ntb_peer_spad_write(pidx, sidx) - write data to Scratchpad register of the peer with pidx-index Since there is hardware which doesn't support Scratchpad registers, the corresponding API methods are now made optional. Signed-off-by: Serge Semin Acked-by: Allen Hubbe Signed-off-by: Jon Mason --- drivers/ntb/hw/amd/ntb_hw_amd.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/ntb/hw/amd/ntb_hw_amd.c') diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index bcefe1df9ce3..891beb9f26c2 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -432,30 +432,30 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb, return 0; } -static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx) +static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; u32 offset; - if (idx < 0 || idx >= ndev->spad_count) + if (sidx < 0 || sidx >= ndev->spad_count) return -EINVAL; - offset = ndev->peer_spad + (idx << 2); + offset = ndev->peer_spad + (sidx << 2); return readl(mmio + AMD_SPAD_OFFSET + offset); } -static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, - int idx, u32 val) +static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, + int sidx, u32 val) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; u32 offset; - if (idx < 0 || idx >= ndev->spad_count) + if (sidx < 0 || sidx >= ndev->spad_count) return -EINVAL; - offset = ndev->peer_spad + (idx << 2); + offset = ndev->peer_spad + (sidx << 2); writel(val, mmio + AMD_SPAD_OFFSET + offset); return 0; -- cgit From 0f9bfb979a5fae2936afa128c04f29ab5e07a9ad Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Tue, 10 Jan 2017 17:33:36 -0700 Subject: ntb_hw_amd: Style fixes: open code macros that just obfuscate code As per a comments in [1] by Greg Kroah-Hartman, the ndev_* macros should be cleaned up. This makes it more clear what's actually going on when reading the code. [1] http://www.spinics.net/lists/linux-pci/msg56904.html Signed-off-by: Logan Gunthorpe Signed-off-by: Jon Mason --- drivers/ntb/hw/amd/ntb_hw_amd.c | 55 ++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 26 deletions(-) (limited to 'drivers/ntb/hw/amd/ntb_hw_amd.c') diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 891beb9f26c2..f0788aae05c9 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -133,7 +133,7 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, if (bar < 0) return bar; - mw_size = pci_resource_len(ndev->ntb.pdev, bar); + mw_size = pci_resource_len(ntb->pdev, bar); /* make sure the range fits in the usable mw size */ if (size > mw_size) @@ -142,7 +142,7 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, mmio = ndev->self_mmio; peer_mmio = ndev->peer_mmio; - base_addr = pci_resource_start(ndev->ntb.pdev, bar); + base_addr = pci_resource_start(ntb->pdev, bar); if (bar != 1) { xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2); @@ -232,7 +232,7 @@ static u64 amd_ntb_link_is_up(struct ntb_dev *ntb, if (width) *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta); - dev_dbg(ndev_dev(ndev), "link is up.\n"); + dev_dbg(&ntb->pdev->dev, "link is up.\n"); ret = 1; } else { @@ -241,7 +241,7 @@ static u64 amd_ntb_link_is_up(struct ntb_dev *ntb, if (width) *width = NTB_WIDTH_NONE; - dev_dbg(ndev_dev(ndev), "link is down.\n"); + dev_dbg(&ntb->pdev->dev, "link is down.\n"); } return ret; @@ -261,7 +261,7 @@ static int amd_ntb_link_enable(struct ntb_dev *ntb, if (ndev->ntb.topo == NTB_TOPO_SEC) return -EINVAL; - dev_dbg(ndev_dev(ndev), "Enabling Link.\n"); + dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); @@ -282,7 +282,7 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb) if (ndev->ntb.topo == NTB_TOPO_SEC) return -EINVAL; - dev_dbg(ndev_dev(ndev), "Enabling Link.\n"); + dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); @@ -500,18 +500,19 @@ static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit) static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) { void __iomem *mmio = ndev->self_mmio; + struct device *dev = &ndev->ntb.pdev->dev; u32 status; status = readl(mmio + AMD_INTSTAT_OFFSET); if (!(status & AMD_EVENT_INTMASK)) return; - dev_dbg(ndev_dev(ndev), "status = 0x%x and vec = %d\n", status, vec); + dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec); status &= AMD_EVENT_INTMASK; switch (status) { case AMD_PEER_FLUSH_EVENT: - dev_info(ndev_dev(ndev), "Flush is done.\n"); + dev_info(dev, "Flush is done.\n"); break; case AMD_PEER_RESET_EVENT: amd_ack_smu(ndev, AMD_PEER_RESET_EVENT); @@ -537,7 +538,7 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) status = readl(mmio + AMD_PMESTAT_OFFSET); /* check if this is WAKEUP event */ if (status & 0x1) - dev_info(ndev_dev(ndev), "Wakeup is done.\n"); + dev_info(dev, "Wakeup is done.\n"); amd_ack_smu(ndev, AMD_PEER_D0_EVENT); @@ -546,14 +547,14 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) AMD_LINK_HB_TIMEOUT); break; default: - dev_info(ndev_dev(ndev), "event status = 0x%x.\n", status); + dev_info(dev, "event status = 0x%x.\n", status); break; } } static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec) { - dev_dbg(ndev_dev(ndev), "vec %d\n", vec); + dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec); if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1)) amd_handle_event(ndev, vec); @@ -575,7 +576,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev) { struct amd_ntb_dev *ndev = dev; - return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq); + return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); } static int ndev_init_isr(struct amd_ntb_dev *ndev, @@ -584,7 +585,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev, struct pci_dev *pdev; int rc, i, msix_count, node; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; node = dev_to_node(&pdev->dev); @@ -626,7 +627,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev, goto err_msix_request; } - dev_dbg(ndev_dev(ndev), "Using msix interrupts\n"); + dev_dbg(&pdev->dev, "Using msix interrupts\n"); ndev->db_count = msix_min; ndev->msix_vec_count = msix_max; return 0; @@ -653,7 +654,7 @@ err_msix_vec_alloc: if (rc) goto err_msi_request; - dev_dbg(ndev_dev(ndev), "Using msi interrupts\n"); + dev_dbg(&pdev->dev, "Using msi interrupts\n"); ndev->db_count = 1; ndev->msix_vec_count = 1; return 0; @@ -670,7 +671,7 @@ err_msi_enable: if (rc) goto err_intx_request; - dev_dbg(ndev_dev(ndev), "Using intx interrupts\n"); + dev_dbg(&pdev->dev, "Using intx interrupts\n"); ndev->db_count = 1; ndev->msix_vec_count = 1; return 0; @@ -685,7 +686,7 @@ static void ndev_deinit_isr(struct amd_ntb_dev *ndev) void __iomem *mmio = ndev->self_mmio; int i; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; /* Mask all doorbell interrupts */ ndev->db_mask = ndev->db_valid_mask; @@ -811,7 +812,8 @@ static void ndev_init_debugfs(struct amd_ntb_dev *ndev) ndev->debugfs_info = NULL; } else { ndev->debugfs_dir = - debugfs_create_dir(ndev_name(ndev), debugfs_dir); + debugfs_create_dir(pci_name(ndev->ntb.pdev), + debugfs_dir); if (!ndev->debugfs_dir) ndev->debugfs_info = NULL; else @@ -846,7 +848,7 @@ static int amd_poll_link(struct amd_ntb_dev *ndev) reg = readl(mmio + AMD_SIDEINFO_OFFSET); reg &= NTB_LIN_STA_ACTIVE_BIT; - dev_dbg(ndev_dev(ndev), "%s: reg_val = 0x%x.\n", __func__, reg); + dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg); if (reg == ndev->cntl_sta) return 0; @@ -928,7 +930,8 @@ static int amd_init_ntb(struct amd_ntb_dev *ndev) break; default: - dev_err(ndev_dev(ndev), "AMD NTB does not support B2B mode.\n"); + dev_err(&ndev->ntb.pdev->dev, + "AMD NTB does not support B2B mode.\n"); return -EINVAL; } @@ -957,10 +960,10 @@ static int amd_init_dev(struct amd_ntb_dev *ndev) struct pci_dev *pdev; int rc = 0; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; ndev->ntb.topo = amd_get_topo(ndev); - dev_dbg(ndev_dev(ndev), "AMD NTB topo is %s\n", + dev_dbg(&pdev->dev, "AMD NTB topo is %s\n", ntb_topo_string(ndev->ntb.topo)); rc = amd_init_ntb(ndev); @@ -969,7 +972,7 @@ static int amd_init_dev(struct amd_ntb_dev *ndev) rc = amd_init_isr(ndev); if (rc) { - dev_err(ndev_dev(ndev), "fail to init isr.\n"); + dev_err(&pdev->dev, "fail to init isr.\n"); return rc; } @@ -1007,7 +1010,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) goto err_dma_mask; - dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n"); + dev_warn(&pdev->dev, "Cannot DMA highmem\n"); } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); @@ -1015,7 +1018,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) goto err_dma_mask; - dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n"); + dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n"); } ndev->self_mmio = pci_iomap(pdev, 0, 0); @@ -1038,7 +1041,7 @@ err_pci_enable: static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev) { - struct pci_dev *pdev = ndev_pdev(ndev); + struct pci_dev *pdev = ndev->ntb.pdev; pci_iounmap(pdev, ndev->self_mmio); -- cgit