diff options
Diffstat (limited to 'drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c')
| -rw-r--r-- | drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c | 580 |
1 files changed, 387 insertions, 193 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 4c6c882c6a1c..ed5231dece3f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -1,292 +1,486 @@ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved +// SPDX-License-Identifier: GPL-2.0-only +/* Atlantic Network Driver * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_pci_func.c: Definition of PCI functions. */ -#include "aq_pci_func.h" +#include <linux/interrupt.h> +#include <linux/module.h> + +#include "aq_main.h" #include "aq_nic.h" #include "aq_vec.h" #include "aq_hw.h" -#include <linux/interrupt.h> +#include "aq_pci_func.h" +#include "hw_atl/hw_atl_a0.h" +#include "hw_atl/hw_atl_b0.h" +#include "hw_atl2/hw_atl2.h" +#include "aq_filters.h" +#include "aq_drvinfo.h" +#include "aq_macsec.h" + +static const struct pci_device_id aq_pci_tbl[] = { + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), }, + + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), }, + + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), }, + + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), }, + + {} +}; + +static const struct aq_board_revision_s hw_atl_boards[] = { + { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, }, + { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, }, + { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, }, + { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, }, + { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, }, + + { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, }, + { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, }, + { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, }, + + { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, }, + { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, }, + { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, }, + { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, }, + { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, }, + + { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, }, + { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, }, + { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, }, + { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, }, + { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, }, + { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, }, + + { AQ_DEVICE_ID_AQC113DEV, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC113, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, }, + { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, }, -struct aq_pci_func_s { - struct pci_dev *pdev; - struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS]; - void __iomem *mmio; - void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS]; - resource_size_t mmio_pa; - unsigned int msix_entry_mask; - unsigned int ports; - bool is_pci_enabled; - bool is_regions; - bool is_pci_using_dac; - struct aq_hw_caps_s aq_hw_caps; }; -struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, - struct pci_dev *pdev, - const struct net_device_ops *ndev_ops, - const struct ethtool_ops *eth_ops) +MODULE_DEVICE_TABLE(pci, aq_pci_tbl); + +static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, + const struct aq_hw_ops **ops, + const struct aq_hw_caps_s **caps) { - struct aq_pci_func_s *self = NULL; - int err = 0; - unsigned int port = 0U; + int i; - if (!aq_hw_ops) { - err = -EFAULT; - goto err_exit; + if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) { + if (hw_atl_boards[i].devid == pdev->device && + (hw_atl_boards[i].revision == AQ_HWREV_ANY || + hw_atl_boards[i].revision == pdev->revision)) { + *ops = hw_atl_boards[i].ops; + *caps = hw_atl_boards[i].caps; + break; + } } - self = kzalloc(sizeof(*self), GFP_KERNEL); - if (!self) { - err = -ENOMEM; + + if (i == ARRAY_SIZE(hw_atl_boards)) + return -EINVAL; + + return 0; +} + +static int aq_pci_func_init(struct pci_dev *pdev) +{ + int err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = -ENOSR; goto err_exit; } - pci_set_drvdata(pdev, self); - self->pdev = pdev; - - err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); + err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio"); if (err < 0) goto err_exit; - self->ports = self->aq_hw_caps.ports; + pci_set_master(pdev); - for (port = 0; port < self->ports; ++port) { - struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, - &pdev->dev, self, - port, aq_hw_ops); + return 0; - if (!aq_nic) { - err = -ENOMEM; - goto err_exit; - } - self->port[port] = aq_nic; +err_exit: + return err; +} + +int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, + char *name, irq_handler_t irq_handler, + void *irq_arg, cpumask_t *affinity_mask) +{ + struct pci_dev *pdev = self->pdev; + int err; + + if (pdev->msix_enabled || pdev->msi_enabled) + err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0, + name, irq_arg); + else + err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy, + IRQF_SHARED, name, irq_arg); + + if (err >= 0) { + self->msix_entry_mask |= (1 << i); + + if (pdev->msix_enabled && affinity_mask) + irq_update_affinity_hint(pci_irq_vector(pdev, i), + affinity_mask); } -err_exit: - if (err < 0) { - if (self) - aq_pci_func_free(self); - self = NULL; + return err; +} + +void aq_pci_func_free_irqs(struct aq_nic_s *self) +{ + struct pci_dev *pdev = self->pdev; + unsigned int i; + void *irq_data; + + for (i = 32U; i--;) { + if (!((1U << i) & self->msix_entry_mask)) + continue; + if (self->aq_nic_cfg.link_irq_vec && + i == self->aq_nic_cfg.link_irq_vec) + irq_data = self; + else if (i < AQ_CFG_VECS_MAX) + irq_data = self->aq_vec[i]; + else + continue; + + if (pdev->msix_enabled) + irq_update_affinity_hint(pci_irq_vector(pdev, i), NULL); + free_irq(pci_irq_vector(pdev, i), irq_data); + self->msix_entry_mask &= ~(1U << i); } +} + +unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self) +{ + if (self->pdev->msix_enabled) + return AQ_HW_IRQ_MSIX; + if (self->pdev->msi_enabled) + return AQ_HW_IRQ_MSI; - (void)err; - return self; + return AQ_HW_IRQ_INTX; } -int aq_pci_func_init(struct aq_pci_func_s *self) +static void aq_pci_free_irq_vectors(struct aq_nic_s *self) { - int err = 0; - unsigned int bar = 0U; - unsigned int port = 0U; + pci_free_irq_vectors(self->pdev); +} - err = pci_enable_device(self->pdev); - if (err < 0) - goto err_exit; +static int aq_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct net_device *ndev; + resource_size_t mmio_pa; + struct aq_nic_s *self; + u32 numvecs; + u32 bar; + int err; - self->is_pci_enabled = true; + err = pci_enable_device(pdev); + if (err) + return err; - err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64)); - if (!err) { - err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64)); - self->is_pci_using_dac = 1; - } - if (err) { - err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(self->pdev, - DMA_BIT_MASK(32)); - self->is_pci_using_dac = 0; - } - if (err != 0) { - err = -ENOSR; - goto err_exit; + err = aq_pci_func_init(pdev); + if (err) + goto err_pci_func; + + ndev = aq_ndev_alloc(); + if (!ndev) { + err = -ENOMEM; + goto err_ndev; } - err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio"); - if (err < 0) - goto err_exit; + self = netdev_priv(ndev); + self->pdev = pdev; + SET_NETDEV_DEV(ndev, &pdev->dev); + pci_set_drvdata(pdev, self); + + mutex_init(&self->fwreq_mutex); + + err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops, + &aq_nic_get_cfg(self)->aq_hw_caps); + if (err) + goto err_ioremap; - self->is_regions = true; + self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); + if (!self->aq_hw) { + err = -ENOMEM; + goto err_ioremap; + } + self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); + if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) { + int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len; - pci_set_master(self->pdev); + self->aq_hw->priv = kzalloc(len, GFP_KERNEL); + if (!self->aq_hw->priv) { + err = -ENOMEM; + goto err_free_aq_hw; + } + } for (bar = 0; bar < 4; ++bar) { - if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) { + if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) { resource_size_t reg_sz; - self->mmio_pa = pci_resource_start(self->pdev, bar); - if (self->mmio_pa == 0U) { + mmio_pa = pci_resource_start(pdev, bar); + if (mmio_pa == 0U) { err = -EIO; - goto err_exit; + goto err_free_aq_hw_priv; } - reg_sz = pci_resource_len(self->pdev, bar); + reg_sz = pci_resource_len(pdev, bar); if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { err = -EIO; - goto err_exit; + goto err_free_aq_hw_priv; } - self->mmio = ioremap_nocache(self->mmio_pa, reg_sz); - if (!self->mmio) { + self->aq_hw->mmio = ioremap(mmio_pa, reg_sz); + if (!self->aq_hw->mmio) { err = -EIO; - goto err_exit; + goto err_free_aq_hw_priv; } break; } } - /*enable interrupts */ -#if !AQ_CFG_FORCE_LEGACY_INT - err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, - self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX); - - if (err < 0) { - err = pci_alloc_irq_vectors(self->pdev, 1, 1, - PCI_IRQ_MSI | PCI_IRQ_LEGACY); - if (err < 0) - goto err_exit; + if (bar == 4) { + err = -EIO; + goto err_free_aq_hw_priv; } -#endif - /* net device init */ - for (port = 0; port < self->ports; ++port) { - if (!self->port[port]) - continue; + numvecs = min((u8)AQ_CFG_VECS_DEF, + aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs); + numvecs = min(numvecs, num_online_cpus()); + /* Request IRQ vector for PTP */ + numvecs += 1; - err = aq_nic_cfg_start(self->port[port]); - if (err < 0) - goto err_exit; + numvecs += AQ_HW_SERVICE_IRQS; + /*enable interrupts */ +#if !AQ_CFG_FORCE_INTX + err = pci_alloc_irq_vectors(self->pdev, 1, numvecs, PCI_IRQ_ALL_TYPES); + if (err < 0) + goto err_hwinit; + numvecs = err; +#endif + self->irqvecs = numvecs; - err = aq_nic_ndev_init(self->port[port]); - if (err < 0) - goto err_exit; + /* net device init */ + aq_nic_cfg_start(self); - err = aq_nic_ndev_register(self->port[port]); - if (err < 0) - goto err_exit; - } + aq_nic_ndev_init(self); -err_exit: + err = aq_nic_ndev_register(self); if (err < 0) - aq_pci_func_deinit(self); + goto err_register; + + aq_drvinfo_init(ndev); + + return 0; + +err_register: + aq_nic_free_vectors(self); + aq_pci_free_irq_vectors(self); +err_hwinit: + iounmap(self->aq_hw->mmio); +err_free_aq_hw_priv: + kfree(self->aq_hw->priv); +err_free_aq_hw: + kfree(self->aq_hw); +err_ioremap: + free_netdev(ndev); +err_ndev: + pci_release_regions(pdev); +err_pci_func: + pci_disable_device(pdev); + return err; } -int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, - char *name, void *aq_vec, cpumask_t *affinity_mask) +static void aq_pci_remove(struct pci_dev *pdev) { - struct pci_dev *pdev = self->pdev; - int err = 0; + struct aq_nic_s *self = pci_get_drvdata(pdev); - if (pdev->msix_enabled || pdev->msi_enabled) - err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0, - name, aq_vec); - else - err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy, - IRQF_SHARED, name, aq_vec); - - if (err >= 0) { - self->msix_entry_mask |= (1 << i); - self->aq_vec[i] = aq_vec; + if (self->ndev) { + aq_clear_rxnfc_all_rules(self); + if (self->ndev->reg_state == NETREG_REGISTERED) + unregister_netdev(self->ndev); - if (pdev->msix_enabled) - irq_set_affinity_hint(pci_irq_vector(pdev, i), - affinity_mask); +#if IS_ENABLED(CONFIG_MACSEC) + aq_macsec_free(self); +#endif + aq_nic_free_vectors(self); + aq_pci_free_irq_vectors(self); + iounmap(self->aq_hw->mmio); + kfree(self->aq_hw->priv); + kfree(self->aq_hw); + pci_release_regions(pdev); + free_netdev(self->ndev); } - return err; + pci_disable_device(pdev); } -void aq_pci_func_free_irqs(struct aq_pci_func_s *self) +static void aq_pci_shutdown(struct pci_dev *pdev) { - struct pci_dev *pdev = self->pdev; - unsigned int i = 0U; + struct aq_nic_s *self = pci_get_drvdata(pdev); - for (i = 32U; i--;) { - if (!((1U << i) & self->msix_entry_mask)) - continue; + aq_nic_shutdown(self); - if (pdev->msix_enabled) - irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); - free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]); - self->msix_entry_mask &= ~(1U << i); + pci_disable_device(pdev); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); } } -void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self) +#ifdef CONFIG_PM +static int aq_suspend_common(struct device *dev) { - return self->mmio; -} + struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev)); -unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self) -{ - if (self->pdev->msix_enabled) - return AQ_HW_IRQ_MSIX; - if (self->pdev->msi_enabled) - return AQ_HW_IRQ_MSIX; - return AQ_HW_IRQ_LEGACY; -} + rtnl_lock(); -void aq_pci_func_deinit(struct aq_pci_func_s *self) -{ - if (!self) - goto err_exit; + nic->power_state = AQ_HW_POWER_STATE_D3; + netif_device_detach(nic->ndev); + netif_tx_stop_all_queues(nic->ndev); - aq_pci_func_free_irqs(self); - pci_free_irq_vectors(self->pdev); + if (netif_running(nic->ndev)) + aq_nic_stop(nic); - if (self->is_regions) - pci_release_regions(self->pdev); + aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); + aq_nic_set_power(nic); - if (self->is_pci_enabled) - pci_disable_device(self->pdev); + rtnl_unlock(); -err_exit:; + return 0; } -void aq_pci_func_free(struct aq_pci_func_s *self) +static int atl_resume_common(struct device *dev) { - unsigned int port = 0U; + struct pci_dev *pdev = to_pci_dev(dev); + struct aq_nic_s *nic; + int ret = 0; - if (!self) - goto err_exit; + nic = pci_get_drvdata(pdev); - for (port = 0; port < self->ports; ++port) { - if (!self->port[port]) - continue; + rtnl_lock(); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (netif_running(nic->ndev)) { + ret = aq_nic_init(nic); + if (ret) + goto err_exit; - aq_nic_ndev_free(self->port[port]); + ret = aq_nic_start(nic); + if (ret) + goto err_exit; } - kfree(self); + netif_device_attach(nic->ndev); + netif_tx_start_all_queues(nic->ndev); + +err_exit: + if (ret < 0) + aq_nic_deinit(nic, true); + + rtnl_unlock(); -err_exit:; + return ret; } -int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, - pm_message_t *pm_msg) +static int aq_pm_freeze(struct device *dev) { - int err = 0; - unsigned int port = 0U; + return aq_suspend_common(dev); +} - if (!self) { - err = -EFAULT; - goto err_exit; - } - for (port = 0; port < self->ports; ++port) { - if (!self->port[port]) - continue; +static int aq_pm_suspend_poweroff(struct device *dev) +{ + return aq_suspend_common(dev); +} - (void)aq_nic_change_pm_state(self->port[port], pm_msg); - } +static int aq_pm_thaw(struct device *dev) +{ + return atl_resume_common(dev); +} -err_exit: - return err; +static int aq_pm_resume_restore(struct device *dev) +{ + return atl_resume_common(dev); +} + +static const struct dev_pm_ops aq_pm_ops = { + .suspend = aq_pm_suspend_poweroff, + .poweroff = aq_pm_suspend_poweroff, + .freeze = aq_pm_freeze, + .resume = aq_pm_resume_restore, + .restore = aq_pm_resume_restore, + .thaw = aq_pm_thaw, +}; +#endif + +static struct pci_driver aq_pci_driver = { + .name = AQ_CFG_DRV_NAME, + .id_table = aq_pci_tbl, + .probe = aq_pci_probe, + .remove = aq_pci_remove, + .shutdown = aq_pci_shutdown, +#ifdef CONFIG_PM + .driver.pm = &aq_pm_ops, +#endif +}; + +int aq_pci_func_register_driver(void) +{ + return pci_register_driver(&aq_pci_driver); } + +void aq_pci_func_unregister_driver(void) +{ + pci_unregister_driver(&aq_pci_driver); +} + |
