diff options
Diffstat (limited to 'drivers/net/can/kvaser_pciefd.c')
-rw-r--r-- | drivers/net/can/kvaser_pciefd.c | 299 |
1 files changed, 189 insertions, 110 deletions
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index a57005faa04f..fa04a7ced02b 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -27,12 +27,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) #define KVASER_PCIEFD_MAX_ERR_REP 256U #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U -#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL +#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL #define KVASER_PCIEFD_DMA_COUNT 2U - #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) #define KVASER_PCIEFD_VENDOR 0x1a07 + /* Altera based devices */ #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e @@ -47,12 +47,19 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015 #define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016 +/* Xilinx based devices */ +#define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017 +#define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019 + /* Altera SerDes Enable 64-bit DMA address translation */ #define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0) /* SmartFusion2 SerDes LSB address translation mask */ #define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12) +/* Xilinx SerDes LSB address translation mask */ +#define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12) + /* Kvaser KCAN CAN controller registers */ #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 @@ -281,6 +288,8 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, dma_addr_t addr, int index); static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, dma_addr_t addr, int index); +static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index); struct kvaser_pciefd_address_offset { u32 serdes; @@ -335,6 +344,18 @@ static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offse .kcan_ch1 = 0x142000, }; +static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = { + .serdes = 0x00208, + .pci_ien = 0x102004, + .pci_irq = 0x102008, + .sysid = 0x100000, + .loopback = 0x103000, + .kcan_srb_fifo = 0x120000, + .kcan_srb = 0x121000, + .kcan_ch0 = 0x140000, + .kcan_ch1 = 0x142000, +}; + static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = { .kcan_rx0 = BIT(4), .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) }, @@ -347,6 +368,12 @@ static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = { .all = GENMASK(19, 16) | BIT(4), }; +static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = { + .kcan_rx0 = BIT(4), + .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) }, + .all = GENMASK(23, 16) | BIT(4), +}; + static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = { .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera, }; @@ -355,6 +382,10 @@ static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = { .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2, }; +static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = { + .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx, +}; + static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = { .address_offset = &kvaser_pciefd_altera_address_offset, .irq_mask = &kvaser_pciefd_altera_irq_mask, @@ -367,6 +398,12 @@ static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = { .ops = &kvaser_pciefd_sf2_dev_ops, }; +static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = { + .address_offset = &kvaser_pciefd_xilinx_address_offset, + .irq_mask = &kvaser_pciefd_xilinx_irq_mask, + .ops = &kvaser_pciefd_xilinx_dev_ops, +}; + struct kvaser_pciefd_can { struct can_priv can; struct kvaser_pciefd *kv_pcie; @@ -457,6 +494,14 @@ static struct pci_device_id kvaser_pciefd_id_table[] = { .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, }, { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, + }, + { 0, }, }; @@ -505,7 +550,7 @@ static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) spin_unlock_irqrestore(&can->lock, irq); } -static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) +static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) { u32 msk; @@ -666,17 +711,17 @@ static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) static int kvaser_pciefd_open(struct net_device *netdev) { - int err; + int ret; struct kvaser_pciefd_can *can = netdev_priv(netdev); - err = open_candev(netdev); - if (err) - return err; + ret = open_candev(netdev); + if (ret) + return ret; - err = kvaser_pciefd_bus_on(can); - if (err) { + ret = kvaser_pciefd_bus_on(can); + if (ret) { close_candev(netdev); - return err; + return ret; } return 0; @@ -954,7 +999,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | - CAN_CTRLMODE_CC_LEN8_DLC; + CAN_CTRLMODE_CC_LEN8_DLC | + CAN_CTRLMODE_BERR_REPORTING; status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { @@ -987,15 +1033,15 @@ static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) int i; for (i = 0; i < pcie->nr_channels; i++) { - int err = register_candev(pcie->can[i]->can.dev); + int ret = register_candev(pcie->can[i]->can.dev); - if (err) { + if (ret) { int j; /* Unregister all successfully registered devices. */ for (j = 0; j < i; j++) unregister_candev(pcie->can[j]->can.dev); - return err; + return ret; } } @@ -1008,13 +1054,13 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, void __iomem *serdes_base; u32 word1, word2; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT; - word2 = addr >> 32; -#else - word1 = addr; - word2 = 0; -#endif + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) { + word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT; + word2 = upper_32_bits(addr); + } else { + word1 = addr; + word2 = 0; + } serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; iowrite32(word1, serdes_base); iowrite32(word2, serdes_base + 0x4); @@ -1027,14 +1073,29 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; u32 msb = 0x0; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - msb = addr >> 32; -#endif + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + msb = upper_32_bits(addr); + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; iowrite32(lsb, serdes_base); iowrite32(msb, serdes_base + 0x4); } +static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index) +{ + void __iomem *serdes_base; + u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK; + u32 msb = 0x0; + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + msb = upper_32_bits(addr); + + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; + iowrite32(msb, serdes_base); + iowrite32(lsb, serdes_base + 0x4); +} + static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) { int i; @@ -1044,6 +1105,9 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) /* Disable the DMA */ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); + + dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64)); + for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, KVASER_PCIEFD_DMA_SIZE, @@ -1171,11 +1235,15 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, } static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, + const struct can_berr_counter *bec, struct can_frame *cf, enum can_state new_state, enum can_state tx_state, enum can_state rx_state) { + enum can_state old_state; + + old_state = can->can.state; can_change_state(can->can.dev, cf, tx_state, rx_state); if (new_state == CAN_STATE_BUS_OFF) { @@ -1191,6 +1259,18 @@ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, can_bus_off(ndev); } } + if (old_state == CAN_STATE_BUS_OFF && + new_state == CAN_STATE_ERROR_ACTIVE && + can->can.restart_ms) { + can->can.can_stats.restarts++; + if (cf) + cf->can_id |= CAN_ERR_RESTARTED; + } + if (cf && new_state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec->txerr; + cf->data[7] = bec->rxerr; + } } static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, @@ -1225,7 +1305,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, struct can_berr_counter bec; enum can_state old_state, new_state, tx_state, rx_state; struct net_device *ndev = can->can.dev; - struct sk_buff *skb; + struct sk_buff *skb = NULL; struct can_frame *cf = NULL; old_state = can->can.state; @@ -1234,16 +1314,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); - skb = alloc_can_err_skb(ndev, &cf); + if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + skb = alloc_can_err_skb(ndev, &cf); if (new_state != old_state) { - kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); - if (old_state == CAN_STATE_BUS_OFF && - new_state == CAN_STATE_ERROR_ACTIVE && - can->can.restart_ms) { - can->can.can_stats.restarts++; - if (skb) - cf->can_id |= CAN_ERR_RESTARTED; - } + kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state); } can->err_rep_cnt++; @@ -1256,18 +1330,19 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, can->bec.txerr = bec.txerr; can->bec.rxerr = bec.rxerr; - if (!skb) { - ndev->stats.rx_dropped++; - return -ENOMEM; + if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + if (!skb) { + netdev_warn(ndev, "No memory left for err_skb\n"); + ndev->stats.rx_dropped++; + return -ENOMEM; + } + kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); + cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + netif_rx(skb); } - kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); - cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; - cf->data[6] = bec.txerr; - cf->data[7] = bec.rxerr; - - netif_rx(skb); - return 0; } @@ -1296,6 +1371,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, { struct can_berr_counter bec; enum can_state old_state, new_state, tx_state, rx_state; + int ret = 0; old_state = can->can.state; @@ -1309,25 +1385,15 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, struct can_frame *cf; skb = alloc_can_err_skb(ndev, &cf); - if (!skb) { + kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state); + if (skb) { + kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); + netif_rx(skb); + } else { ndev->stats.rx_dropped++; - return -ENOMEM; - } - - kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); - if (old_state == CAN_STATE_BUS_OFF && - new_state == CAN_STATE_ERROR_ACTIVE && - can->can.restart_ms) { - can->can.can_stats.restarts++; - cf->can_id |= CAN_ERR_RESTARTED; + netdev_warn(ndev, "No memory left for err_skb\n"); + ret = -ENOMEM; } - - kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); - - cf->data[6] = bec.txerr; - cf->data[7] = bec.rxerr; - - netif_rx(skb); } can->bec.txerr = bec.txerr; can->bec.rxerr = bec.rxerr; @@ -1335,7 +1401,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, if (bec.txerr || bec.rxerr) mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); - return 0; + return ret; } static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, @@ -1559,7 +1625,7 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, /* Position does not point to the end of the package, * corrupted packet size? */ - if ((*start_pos + size) != pos) + if (unlikely((*start_pos + size) != pos)) return -EIO; /* Point to the next packet header, if any */ @@ -1580,31 +1646,24 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) return res; } -static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) { u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) kvaser_pciefd_read_buffer(pcie, 0); - /* Reset DMA buffer 0 */ - iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, - KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - } - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) kvaser_pciefd_read_buffer(pcie, 1); - /* Reset DMA buffer 1 */ - iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, - KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - } - if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || - irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || - irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || - irq & KVASER_PCIEFD_SRB_IRQ_DUF1) + if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF1)) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + return irq; } static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) @@ -1631,27 +1690,31 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) { struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; - u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); + u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); + u32 srb_irq = 0; + u32 srb_release = 0; int i; - if (!(board_irq & irq_mask->all)) + if (!(pci_irq & irq_mask->all)) return IRQ_NONE; - if (board_irq & irq_mask->kcan_rx0) - kvaser_pciefd_receive_irq(pcie); + if (pci_irq & irq_mask->kcan_rx0) + srb_irq = kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { - if (!pcie->can[i]) { - dev_err(&pcie->pci->dev, - "IRQ mask points to unallocated controller\n"); - break; - } - - /* Check that mask matches channel (i) IRQ mask */ - if (board_irq & irq_mask->kcan_tx[i]) + if (pci_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } + if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) + srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0; + + if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) + srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1; + + if (srb_release) + iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + return IRQ_HANDLED; } @@ -1673,7 +1736,7 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) static int kvaser_pciefd_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - int err; + int ret; struct kvaser_pciefd *pcie; const struct kvaser_pciefd_irq_mask *irq_mask; void __iomem *irq_en_base; @@ -1687,39 +1750,52 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; irq_mask = pcie->driver_data->irq_mask; - err = pci_enable_device(pdev); - if (err) - return err; + ret = pci_enable_device(pdev); + if (ret) + return ret; - err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); - if (err) + ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); + if (ret) goto err_disable_pci; pcie->reg_base = pci_iomap(pdev, 0, 0); if (!pcie->reg_base) { - err = -ENOMEM; + ret = -ENOMEM; goto err_release_regions; } - err = kvaser_pciefd_setup_board(pcie); - if (err) + ret = kvaser_pciefd_setup_board(pcie); + if (ret) goto err_pci_iounmap; - err = kvaser_pciefd_setup_dma(pcie); - if (err) + ret = kvaser_pciefd_setup_dma(pcie); + if (ret) goto err_pci_iounmap; pci_set_master(pdev); - err = kvaser_pciefd_setup_can_ctrls(pcie); - if (err) + ret = kvaser_pciefd_setup_can_ctrls(pcie); + if (ret) goto err_teardown_can_ctrls; - err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, - IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); - if (err) + ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI); + if (ret < 0) { + dev_err(&pcie->pci->dev, "Failed to allocate IRQ vectors.\n"); goto err_teardown_can_ctrls; + } + ret = pci_irq_vector(pcie->pci, 0); + if (ret < 0) + goto err_pci_free_irq_vectors; + + pcie->pci->irq = ret; + + ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, + IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); + if (ret) { + dev_err(&pcie->pci->dev, "Failed to request IRQ %d\n", pcie->pci->irq); + goto err_pci_free_irq_vectors; + } iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); @@ -1737,8 +1813,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - err = kvaser_pciefd_reg_candev(pcie); - if (err) + ret = kvaser_pciefd_reg_candev(pcie); + if (ret) goto err_free_irq; return 0; @@ -1748,6 +1824,9 @@ err_free_irq: iowrite32(0, irq_en_base); free_irq(pcie->pci->irq, pcie); +err_pci_free_irq_vectors: + pci_free_irq_vectors(pcie->pci); + err_teardown_can_ctrls: kvaser_pciefd_teardown_can_ctrls(pcie); iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); @@ -1762,7 +1841,7 @@ err_release_regions: err_disable_pci: pci_disable_device(pdev); - return err; + return ret; } static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) @@ -1793,7 +1872,7 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev) iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); free_irq(pcie->pci->irq, pcie); - + pci_free_irq_vectors(pcie->pci); pci_iounmap(pdev, pcie->reg_base); pci_release_regions(pdev); pci_disable_device(pdev); |