diff options
Diffstat (limited to 'drivers/dma/ioat/init.c')
| -rw-r--r-- | drivers/dma/ioat/init.c | 194 |
1 files changed, 111 insertions, 83 deletions
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 2d810dfcdc48..227398673b73 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -1,19 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Intel I/OAT DMA Linux driver * Copyright(c) 2004 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * */ #include <linux/init.h> @@ -27,7 +15,6 @@ #include <linux/workqueue.h> #include <linux/prefetch.h> #include <linux/dca.h> -#include <linux/aer.h> #include <linux/sizes.h> #include "dma.h" #include "registers.h" @@ -36,6 +23,7 @@ #include "../dmaengine.h" MODULE_VERSION(IOAT_DMA_VERSION); +MODULE_DESCRIPTION("Intel I/OAT DMA Linux driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel Corporation"); @@ -119,6 +107,9 @@ static const struct pci_device_id ioat_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, + /* I/OAT v3.4 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); @@ -135,10 +126,10 @@ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); static int ioat_dca_enabled = 1; module_param(ioat_dca_enabled, int, 0644); MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); -int ioat_pending_level = 4; +int ioat_pending_level = 7; module_param(ioat_pending_level, int, 0644); MODULE_PARM_DESC(ioat_pending_level, - "high-water mark for pushing ioat descriptors (default: 4)"); + "high-water mark for pushing ioat descriptors (default: 7)"); static char ioat_interrupt_style[32] = "msix"; module_param_string(ioat_interrupt_style, ioat_interrupt_style, sizeof(ioat_interrupt_style), 0644); @@ -430,7 +421,7 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) msix: /* The number of MSI-X vectors should equal the number of channels */ - msixcnt = ioat_dma->dma_dev.chancnt; + msixcnt = ioat_dma->chancnt; for (i = 0; i < msixcnt; i++) ioat_dma->msix_entries[i].entry = i; @@ -521,7 +512,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma) dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->dev = &pdev->dev; - if (!dma->chancnt) { + if (!ioat_dma->chancnt) { dev_err(dev, "channel enumeration error\n"); goto err_setup_interrupts; } @@ -544,18 +535,6 @@ err_out: return err; } -static int ioat_register(struct ioatdma_device *ioat_dma) -{ - int err = dma_async_device_register(&ioat_dma->dma_dev); - - if (err) { - ioat_disable_interrupts(ioat_dma); - dma_pool_destroy(ioat_dma->completion_pool); - } - - return err; -} - static void ioat_dma_remove(struct ioatdma_device *ioat_dma) { struct dma_device *dma = &ioat_dma->dma_dev; @@ -565,10 +544,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) ioat_kobject_del(ioat_dma); dma_async_device_unregister(dma); - - dma_pool_destroy(ioat_dma->completion_pool); - - INIT_LIST_HEAD(&dma->channels); } /** @@ -581,15 +556,16 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) struct device *dev = &ioat_dma->pdev->dev; struct dma_device *dma = &ioat_dma->dma_dev; u8 xfercap_log; + int chancnt; int i; INIT_LIST_HEAD(&dma->channels); - dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); - dma->chancnt &= 0x1f; /* bits [4:0] valid */ - if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { + chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); + chancnt &= 0x1f; /* bits [4:0] valid */ + if (chancnt > ARRAY_SIZE(ioat_dma->idx)) { dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", - dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); - dma->chancnt = ARRAY_SIZE(ioat_dma->idx); + chancnt, ARRAY_SIZE(ioat_dma->idx)); + chancnt = ARRAY_SIZE(ioat_dma->idx); } xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); xfercap_log &= 0x1f; /* bits [4:0] valid */ @@ -597,8 +573,8 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) return; dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); - for (i = 0; i < dma->chancnt; i++) { - ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); + for (i = 0; i < chancnt; i++) { + ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) break; @@ -610,12 +586,12 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) break; } } - dma->chancnt = i; + ioat_dma->chancnt = i; } /** * ioat_free_chan_resources - release all the descriptors - * @chan: the channel to be cleaned + * @c: the channel to be cleaned */ static void ioat_free_chan_resources(struct dma_chan *c) { @@ -633,7 +609,16 @@ static void ioat_free_chan_resources(struct dma_chan *c) return; ioat_stop(ioat_chan); - ioat_reset_hw(ioat_chan); + + if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) { + ioat_reset_hw(ioat_chan); + + /* Put LTR to idle */ + if (ioat_dma->version >= IOAT_VER_3_4) + writeb(IOAT_CHAN_LTR_SWSEL_IDLE, + ioat_chan->reg_base + + IOAT_CHAN_LTR_SWSEL_OFFSET); + } spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->prep_lock); @@ -655,7 +640,7 @@ static void ioat_free_chan_resources(struct dma_chan *c) } for (i = 0; i < ioat_chan->desc_chunks; i++) { - dma_free_coherent(to_dev(ioat_chan), SZ_2M, + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE, ioat_chan->descs[i].virt, ioat_chan->descs[i].hw); ioat_chan->descs[i].virt = NULL; @@ -724,6 +709,28 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->cleanup_lock); + /* Setting up LTR values for 3.4 or later */ + if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) { + u32 lat_val; + + lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL | + IOAT_CHAN_LTR_ACTIVE_SNLATSCALE | + IOAT_CHAN_LTR_ACTIVE_SNREQMNT; + writel(lat_val, ioat_chan->reg_base + + IOAT_CHAN_LTR_ACTIVE_OFFSET); + + lat_val = IOAT_CHAN_LTR_IDLE_SNVAL | + IOAT_CHAN_LTR_IDLE_SNLATSCALE | + IOAT_CHAN_LTR_IDLE_SNREQMNT; + writel(lat_val, ioat_chan->reg_base + + IOAT_CHAN_LTR_IDLE_OFFSET); + + /* Select to active */ + writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE, + ioat_chan->reg_base + + IOAT_CHAN_LTR_SWSEL_OFFSET); + } + ioat_start_null_desc(ioat_chan); /* check that we got off the ground */ @@ -749,8 +756,6 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, struct ioatdma_chan *ioat_chan, int idx) { struct dma_device *dma = &ioat_dma->dma_dev; - struct dma_chan *c = &ioat_chan->dma_chan; - unsigned long data = (unsigned long) c; ioat_chan->ioat_dma = ioat_dma; ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); @@ -760,7 +765,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); ioat_dma->idx[idx] = ioat_chan; timer_setup(&ioat_chan->timer, ioat_timer_event, 0); - tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); + tasklet_setup(&ioat_chan->cleanup_task, ioat_cleanup_event); } #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ @@ -900,7 +905,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) op = IOAT_OP_XOR_VAL; - /* validate the sources with the destintation page */ + /* validate the sources with the destination page */ for (i = 0; i < IOAT_NUM_SRC_TEST; i++) xor_val_srcs[i] = xor_srcs[i]; xor_val_srcs[i] = dest; @@ -1165,9 +1170,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); } - err = ioat_register(ioat_dma); + err = dma_async_device_register(&ioat_dma->dma_dev); if (err) - return err; + goto err_disable_interrupts; ioat_kobject_add(ioat_dma, &ioat_ktype); @@ -1175,17 +1180,30 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); /* disable relaxed ordering */ - err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16); - if (err) - return err; + err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16); + if (err) { + err = pcibios_err_to_errno(err); + goto err_disable_interrupts; + } /* clear relaxed ordering enable */ - val16 &= ~IOAT_DEVCTRL_ROE; - err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16); - if (err) - return err; + val16 &= ~PCI_EXP_DEVCTL_RELAX_EN; + err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16); + if (err) { + err = pcibios_err_to_errno(err); + goto err_disable_interrupts; + } + + if (ioat_dma->cap & IOAT_CAP_DPS) + writeb(ioat_pending_level + 1, + ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET); return 0; + +err_disable_interrupts: + ioat_disable_interrupts(ioat_dma); + dma_pool_destroy(ioat_dma->completion_pool); + return err; } static void ioat_shutdown(struct pci_dev *pdev) @@ -1206,12 +1224,12 @@ static void ioat_shutdown(struct pci_dev *pdev) set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); spin_unlock_bh(&ioat_chan->prep_lock); /* - * Synchronization rule for del_timer_sync(): + * Synchronization rule for timer_delete_sync(): * - The caller must not hold locks which would prevent * completion of the timer's handler. * So prep_lock cannot be held before calling it. */ - del_timer_sync(&ioat_chan->timer); + timer_delete_sync(&ioat_chan->timer); /* this should quiesce then reset */ ioat_reset_hw(ioat_chan); @@ -1245,7 +1263,7 @@ static void ioat_resume(struct ioatdma_device *ioat_dma) #define DRV_NAME "ioatdma" static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev, - enum pci_channel_state error) + pci_channel_state_t error) { dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error); @@ -1268,7 +1286,6 @@ static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev) } else { pci_set_master(pdev); pci_restore_state(pdev); - pci_save_state(pdev); pci_wake_from_d3(pdev, false); } @@ -1300,16 +1317,28 @@ static struct pci_driver ioat_pci_driver = { .err_handler = &ioat_err_handler, }; +static void release_ioatdma(struct dma_device *device) +{ + struct ioatdma_device *d = to_ioatdma_device(device); + int i; + + for (i = 0; i < IOAT_MAX_CHANS; i++) + kfree(d->idx[i]); + + dma_pool_destroy(d->completion_pool); + kfree(d); +} + static struct ioatdma_device * alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) { - struct device *dev = &pdev->dev; - struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL); if (!d) return NULL; d->pdev = pdev; d->reg_base = iobase; + d->dma_dev.device_release = release_ioatdma; return d; } @@ -1318,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) void __iomem * const *iomap; struct device *dev = &pdev->dev; struct ioatdma_device *device; + unsigned int i; + u8 version; int err; err = pcim_enable_device(pdev); @@ -1331,15 +1362,11 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!iomap) return -ENOMEM; - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) - return err; + version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET); + if (version < IOAT_VER_3_0) + return -ENODEV; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) return err; @@ -1349,20 +1376,19 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); pci_set_drvdata(pdev, device); - device->version = readb(device->reg_base + IOAT_VER_OFFSET); - if (device->version >= IOAT_VER_3_0) { - if (is_skx_ioat(pdev)) - device->version = IOAT_VER_3_2; - err = ioat3_dma_probe(device, ioat_dca_enabled); + device->version = version; + if (device->version >= IOAT_VER_3_4) + ioat_dca_enabled = 0; - if (device->version >= IOAT_VER_3_3) - pci_enable_pcie_error_reporting(pdev); - } else - return -ENODEV; + if (is_skx_ioat(pdev)) + device->version = IOAT_VER_3_2; + err = ioat3_dma_probe(device, ioat_dca_enabled); if (err) { + for (i = 0; i < IOAT_MAX_CHANS; i++) + kfree(device->idx[i]); + kfree(device); dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); - pci_disable_pcie_error_reporting(pdev); return -ENODEV; } @@ -1376,6 +1402,8 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; + ioat_shutdown(pdev); + dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); @@ -1383,7 +1411,6 @@ static void ioat_remove(struct pci_dev *pdev) device->dca = NULL; } - pci_disable_pcie_error_reporting(pdev); ioat_dma_remove(device); } @@ -1422,6 +1449,7 @@ module_init(ioat_init_module); static void __exit ioat_exit_module(void) { pci_unregister_driver(&ioat_pci_driver); + kmem_cache_destroy(ioat_sed_cache); kmem_cache_destroy(ioat_cache); } module_exit(ioat_exit_module); |
