diff options
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_main.c | 16 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/core.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.c | 71 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 72 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.h | 3 |
6 files changed, 121 insertions, 52 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 54efa9a5167b..702446a93697 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -1981,14 +1981,6 @@ out: ehea_update_bcmc_registrations(); } -static int ehea_change_mtu(struct net_device *dev, int new_mtu) -{ - if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE)) - return -EINVAL; - dev->mtu = new_mtu; - return 0; -} - static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe) { swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; @@ -2446,6 +2438,8 @@ static int ehea_open(struct net_device *dev) netif_info(port, ifup, dev, "enabling port\n"); + netif_carrier_off(dev); + ret = ehea_up(dev); if (!ret) { port_napi_enable(port); @@ -2968,7 +2962,6 @@ static const struct net_device_ops ehea_netdev_ops = { .ndo_set_mac_address = ehea_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = ehea_set_multicast_list, - .ndo_change_mtu = ehea_change_mtu, .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid, .ndo_tx_timeout = ehea_tx_watchdog, @@ -3041,13 +3034,16 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, NETIF_F_IP_CSUM; dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; + /* MTU range: 68 - 9022 */ + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = EHEA_MAX_PACKET_SIZE; + INIT_WORK(&port->reset_task, ehea_reset_port); INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats); init_waitqueue_head(&port->swqe_avail_wq); init_waitqueue_head(&port->restart_wq); - memset(&port->stats, 0, sizeof(struct net_device_stats)); ret = register_netdev(dev); if (ret) { pr_err("register_netdev failed. ret=%d\n", ret); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 8f139197f1aa..52a69c925965 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1099,9 +1099,6 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu) struct emac_instance *dev = netdev_priv(ndev); int ret = 0; - if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu) - return -EINVAL; - DBG(dev, "change_mtu(%d)" NL, new_mtu); if (netif_running(ndev)) { @@ -2564,7 +2561,7 @@ static int emac_init_config(struct emac_instance *dev) if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1)) return -ENXIO; if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0)) - dev->max_mtu = 1500; + dev->max_mtu = ETH_DATA_LEN; if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0)) dev->rx_fifo_size = 2048; if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0)) @@ -2718,7 +2715,6 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = emac_set_mac_address, .ndo_start_xmit = emac_start_xmit, - .ndo_change_mtu = eth_change_mtu, }; static const struct net_device_ops emac_gige_netdev_ops = { @@ -2891,6 +2887,10 @@ static int emac_probe(struct platform_device *ofdev) ndev->netdev_ops = &emac_netdev_ops; ndev->ethtool_ops = &emac_ethtool_ops; + /* MTU range: 46 - 1500 or whatever is in OF */ + ndev->min_mtu = EMAC_MIN_MTU; + ndev->max_mtu = dev->max_mtu; + netif_carrier_off(ndev); err = register_netdev(ndev); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index ebe60719e489..fbece63395a8 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool; static const char ibmveth_driver_name[] = "ibmveth"; static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver"; -#define ibmveth_driver_version "1.05" +#define ibmveth_driver_version "1.06" MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>"); MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver"); @@ -137,6 +137,11 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK; } +static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter) +{ + return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT; +} + static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) { return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); @@ -1174,6 +1179,45 @@ map_failed: goto retry_bounce; } +static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) +{ + int offset = 0; + + /* only TCP packets will be aggregated */ + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_TCP) { + offset = iph->ihl * 4; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + } else { + return; + } + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data; + + if (iph6->nexthdr == IPPROTO_TCP) { + offset = sizeof(struct ipv6hdr); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + } else { + return; + } + } else { + return; + } + /* if mss is not set through Large Packet bit/mss in rx buffer, + * expect that the mss will be written to the tcp header checksum. + */ + if (lrg_pkt) { + skb_shinfo(skb)->gso_size = mss; + } else if (offset) { + struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset); + + skb_shinfo(skb)->gso_size = ntohs(tcph->check); + tcph->check = 0; + } +} + static int ibmveth_poll(struct napi_struct *napi, int budget) { struct ibmveth_adapter *adapter = @@ -1182,6 +1226,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) int frames_processed = 0; unsigned long lpar_rc; struct iphdr *iph; + u16 mss = 0; restart_poll: while (frames_processed < budget) { @@ -1199,9 +1244,21 @@ restart_poll: int length = ibmveth_rxq_frame_length(adapter); int offset = ibmveth_rxq_frame_offset(adapter); int csum_good = ibmveth_rxq_csum_good(adapter); + int lrg_pkt = ibmveth_rxq_large_packet(adapter); skb = ibmveth_rxq_get_buffer(adapter); + /* if the large packet bit is set in the rx queue + * descriptor, the mss will be written by PHYP eight + * bytes from the start of the rx buffer, which is + * skb->data at this stage + */ + if (lrg_pkt) { + __be64 *rxmss = (__be64 *)(skb->data + 8); + + mss = (u16)be64_to_cpu(*rxmss); + } + new_skb = NULL; if (length < rx_copybreak) new_skb = netdev_alloc_skb(netdev, length); @@ -1235,11 +1292,15 @@ restart_poll: if (iph->check == 0xffff) { iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); - adapter->rx_large_packets++; } } } + if (length > netdev->mtu + ETH_HLEN) { + ibmveth_rx_mss_helper(skb, mss, lrg_pkt); + adapter->rx_large_packets++; + } + napi_gro_receive(napi, skb); /* send it up */ netdev->stats.rx_packets++; @@ -1349,9 +1410,6 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) int i, rc; int need_restart = 0; - if (new_mtu < IBMVETH_MIN_MTU) - return -EINVAL; - for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) break; @@ -1551,6 +1609,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netdev->hw_features |= NETIF_F_TSO; } + netdev->min_mtu = IBMVETH_MIN_MTU; + netdev->max_mtu = ETH_MAX_MTU; + memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); if (firmware_has_feature(FW_FEATURE_CMO)) diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 4eade67fe30c..7acda04d034e 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -209,6 +209,7 @@ struct ibmveth_rx_q_entry { #define IBMVETH_RXQ_TOGGLE 0x80000000 #define IBMVETH_RXQ_TOGGLE_SHIFT 31 #define IBMVETH_RXQ_VALID 0x40000000 +#define IBMVETH_RXQ_LRG_PKT 0x04000000 #define IBMVETH_RXQ_NO_CSUM 0x02000000 #define IBMVETH_RXQ_CSUM_GOOD 0x01000000 #define IBMVETH_RXQ_OFF_MASK 0x0000FFFF diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index bfe17d9c022d..c12596676bbb 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -74,7 +74,6 @@ #include <asm/iommu.h> #include <linux/uaccess.h> #include <asm/firmware.h> -#include <linux/seq_file.h> #include <linux/workqueue.h> #include "ibmvnic.h" @@ -902,17 +901,6 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) return 0; } -static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - - if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu) - return -EINVAL; - - netdev->mtu = new_mtu; - return 0; -} - static void ibmvnic_tx_timeout(struct net_device *dev) { struct ibmvnic_adapter *adapter = netdev_priv(dev); @@ -1029,7 +1017,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = { .ndo_set_rx_mode = ibmvnic_set_multi, .ndo_set_mac_address = ibmvnic_set_mac, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = ibmvnic_change_mtu, .ndo_tx_timeout = ibmvnic_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ibmvnic_netpoll_controller, @@ -1190,7 +1177,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter if (!scrq) return NULL; - scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2); + scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2); memset(scrq->msgs, 0, 4 * PAGE_SIZE); if (!scrq->msgs) { dev_warn(dev, "Couldn't allocate crq queue messages page\n"); @@ -1461,14 +1448,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) return rc; req_rx_irq_failed: - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); + } i = adapter->req_tx_queues; req_tx_irq_failed: - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); + } release_sub_crqs_no_irqs(adapter); return rc; } @@ -1503,9 +1492,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) adapter->max_rx_add_entries_per_subcrq > entries_page ? entries_page : adapter->max_rx_add_entries_per_subcrq; - /* Choosing the maximum number of queues supported by firmware*/ - adapter->req_tx_queues = adapter->max_tx_queues; - adapter->req_rx_queues = adapter->max_rx_queues; + adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; + adapter->req_rx_queues = adapter->opt_rx_comp_queues; adapter->req_rx_add_queues = adapter->max_rx_add_queues; adapter->req_mtu = adapter->max_mtu; @@ -2638,10 +2626,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, break; case MIN_MTU: adapter->min_mtu = be64_to_cpu(crq->query_capability.number); + netdev->min_mtu = adapter->min_mtu; netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); break; case MAX_MTU: adapter->max_mtu = be64_to_cpu(crq->query_capability.number); + netdev->max_mtu = adapter->max_mtu; netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); break; case MAX_MULTICAST_FILTERS: @@ -3232,6 +3222,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) spin_unlock_irqrestore(&adapter->inflight_lock, flags); } +static void ibmvnic_xport_event(struct work_struct *work) +{ + struct ibmvnic_adapter *adapter = container_of(work, + struct ibmvnic_adapter, + ibmvnic_xport); + struct device *dev = &adapter->vdev->dev; + long rc; + + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + if (adapter->migrated) { + rc = ibmvnic_reenable_crq_queue(adapter); + if (rc) + dev_err(dev, "Error after enable rc=%ld\n", rc); + adapter->migrated = false; + rc = ibmvnic_send_crq_init(adapter); + if (rc) + dev_err(dev, "Error sending init rc=%ld\n", rc); + } +} + static void ibmvnic_handle_crq(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { @@ -3267,15 +3278,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Re-enabling adapter\n"); adapter->migrated = true; - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); - rc = ibmvnic_reenable_crq_queue(adapter); - if (rc) - dev_err(dev, "Error after enable rc=%ld\n", rc); - adapter->migrated = false; - rc = ibmvnic_send_crq_init(adapter); - if (rc) - dev_err(dev, "Error sending init rc=%ld\n", rc); + schedule_work(&adapter->ibmvnic_xport); } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { dev_info(dev, "Backing device failover detected\n"); netif_carrier_off(netdev); @@ -3284,8 +3287,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, /* The adapter lost the connection */ dev_err(dev, "Virtual Adapter failed (rc=%d)\n", gen_crq->cmd); - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); + schedule_work(&adapter->ibmvnic_xport); } return; case IBMVNIC_CRQ_CMD_RSP: @@ -3654,6 +3656,9 @@ static void handle_crq_init_rsp(struct work_struct *work) goto task_failed; netdev->real_num_tx_queues = adapter->req_tx_queues; + netdev->mtu = adapter->req_mtu; + netdev->min_mtu = adapter->min_mtu; + netdev->max_mtu = adapter->max_mtu; if (adapter->failover) { adapter->failover = false; @@ -3691,7 +3696,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) struct net_device *netdev; unsigned char *mac_addr_p; struct dentry *ent; - char buf[16]; /* debugfs name buf */ + char buf[17]; /* debugfs name buf */ int rc; dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", @@ -3725,6 +3730,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) SET_NETDEV_DEV(netdev, &dev->dev); INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); + INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event); spin_lock_init(&adapter->stats_lock); @@ -3792,6 +3798,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->real_num_tx_queues = adapter->req_tx_queues; + netdev->mtu = adapter->req_mtu; rc = register_netdev(netdev); if (rc) { @@ -3828,6 +3835,9 @@ static int ibmvnic_remove(struct vio_dev *dev) if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) debugfs_remove_recursive(adapter->debugfs_dir); + dma_unmap_single(&dev->dev, adapter->stats_token, + sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE); + if (adapter->ras_comps) dma_free_coherent(&dev->dev, adapter->ras_comp_num * diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index bfc84c7d0e11..dd775d951b73 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -27,7 +27,7 @@ /**************************************************************************/ #define IBMVNIC_NAME "ibmvnic" -#define IBMVNIC_DRIVER_VERSION "1.0" +#define IBMVNIC_DRIVER_VERSION "1.0.1" #define IBMVNIC_INVALID_MAP -1 #define IBMVNIC_STATS_TIMEOUT 1 /* basic structures plus 100 2k buffers */ @@ -1048,5 +1048,6 @@ struct ibmvnic_adapter { u8 map_id; struct work_struct vnic_crq_init; + struct work_struct ibmvnic_xport; bool failover; }; |