summaryrefslogtreecommitdiff
path: root/drivers/message/fusion/mptlan.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/message/fusion/mptlan.c')
-rw-r--r--drivers/message/fusion/mptlan.c116
1 files changed, 61 insertions, 55 deletions
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 55dd71bbdc2a..de2e7bcf4784 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -72,9 +72,6 @@ MODULE_VERSION(my_VERSION);
#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
-#define MPT_LAN_TRANSACTION32_SIZE \
- (sizeof(SGETransaction32_t) - sizeof(u32))
-
/*
* Fusion MPT LAN private structures
*/
@@ -394,7 +391,8 @@ mpt_lan_open(struct net_device *dev)
"a moment.\n");
}
- priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
+ priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int),
+ GFP_KERNEL);
if (priv->mpt_txfidx == NULL)
goto out;
priv->mpt_txfidx_tail = -1;
@@ -408,8 +406,8 @@ mpt_lan_open(struct net_device *dev)
dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
- priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
- GFP_KERNEL);
+ priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int),
+ GFP_KERNEL);
if (priv->mpt_rxfidx == NULL)
goto out_SendCtl;
priv->mpt_rxfidx_tail = -1;
@@ -518,9 +516,9 @@ mpt_lan_close(struct net_device *dev)
if (priv->RcvCtl[i].skb != NULL) {
/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
/**/ "is still out\n", i));
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
- priv->RcvCtl[i].len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[i].dma,
+ priv->RcvCtl[i].len, DMA_FROM_DEVICE);
dev_kfree_skb(priv->RcvCtl[i].skb);
}
}
@@ -530,9 +528,9 @@ mpt_lan_close(struct net_device *dev)
for (i = 0; i < priv->tx_max_out; i++) {
if (priv->SendCtl[i].skb != NULL) {
- pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
- priv->SendCtl[i].len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev,
+ priv->SendCtl[i].dma,
+ priv->SendCtl[i].len, DMA_TO_DEVICE);
dev_kfree_skb(priv->SendCtl[i].skb);
}
}
@@ -551,7 +549,7 @@ mpt_lan_close(struct net_device *dev)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Tx timeout handler. */
static void
-mpt_lan_tx_timeout(struct net_device *dev)
+mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
@@ -584,8 +582,8 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
- priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma,
+ priv->SendCtl[ctx].len, DMA_TO_DEVICE);
dev_kfree_skb_irq(sent);
spin_lock_irqsave(&priv->txfidx_lock, flags);
@@ -650,8 +648,9 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
- priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev,
+ priv->SendCtl[ctx].dma,
+ priv->SendCtl[ctx].len, DMA_TO_DEVICE);
dev_kfree_skb_irq(sent);
priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
@@ -670,7 +669,7 @@ out:
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-static int
+static netdev_tx_t
mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
@@ -722,8 +721,8 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
skb_reset_mac_header(skb);
skb_pull(skb, 12);
- dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
priv->SendCtl[ctx].skb = skb;
priv->SendCtl[ctx].dma = dma;
@@ -744,7 +743,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
pTrans->ContextSize = sizeof(u32);
pTrans->DetailsLength = 2 * sizeof(u32);
pTrans->Flags = 0;
- pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+ pTrans->TransactionContext = cpu_to_le32(ctx);
// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
// IOC_AND_NETDEV_NAMES_s_s(dev),
@@ -870,13 +869,17 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
return -ENOMEM;
}
- pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
- pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
goto out;
}
@@ -884,8 +887,8 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
priv->RcvCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
out:
spin_lock_irqsave(&priv->rxfidx_lock, flags);
@@ -929,8 +932,8 @@ mpt_lan_receive_post_free(struct net_device *dev,
// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
priv->RcvCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1030,16 +1033,16 @@ mpt_lan_receive_post_reply(struct net_device *dev,
// IOC_AND_NETDEV_NAMES_s_s(dev),
// i, l));
- pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
- pci_dma_sync_single_for_device(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
szrem -= l;
@@ -1058,17 +1061,17 @@ mpt_lan_receive_post_reply(struct net_device *dev,
return -ENOMEM;
}
- pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
- pci_dma_sync_single_for_device(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1079,8 +1082,8 @@ mpt_lan_receive_post_reply(struct net_device *dev,
priv->RcvCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
priv->RcvCtl[ctx].dma = 0;
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1158,7 +1161,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
__func__, buckets, curr));
max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
- (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
+ (sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
while (buckets) {
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
@@ -1201,10 +1204,10 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
skb = priv->RcvCtl[ctx].skb;
if (skb && (priv->RcvCtl[ctx].len != len)) {
- pci_unmap_single(mpt_dev->pcidev,
+ dma_unmap_single(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb(priv->RcvCtl[ctx].skb);
skb = priv->RcvCtl[ctx].skb = NULL;
}
@@ -1220,8 +1223,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
break;
}
- dma = pci_map_single(mpt_dev->pcidev, skb->data,
- len, PCI_DMA_FROMDEVICE);
+ dma = dma_map_single(&mpt_dev->pcidev->dev,
+ skb->data, len,
+ DMA_FROM_DEVICE);
priv->RcvCtl[ctx].skb = skb;
priv->RcvCtl[ctx].dma = dma;
@@ -1233,7 +1237,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
pTrans->ContextSize = sizeof(u32);
pTrans->DetailsLength = 0;
pTrans->Flags = 0;
- pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+ pTrans->TransactionContext = cpu_to_le32(ctx);
pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
@@ -1352,7 +1356,7 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
HWaddr[5] = a[0];
dev->addr_len = FC_ALEN;
- memcpy(dev->dev_addr, HWaddr, FC_ALEN);
+ dev_addr_set(dev, HWaddr);
memset(dev->broadcast, 0xff, FC_ALEN);
/* The Tx queue is 127 deep on the 909.
@@ -1379,7 +1383,7 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
}
static int
-mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+mptlan_probe(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct net_device *dev;
@@ -1429,7 +1433,9 @@ mptlan_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct net_device *dev = ioc->netdev;
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ cancel_delayed_work_sync(&priv->post_buckets_task);
if(dev != NULL) {
unregister_netdev(dev);
free_netdev(dev);