summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 14:45:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 14:45:08 -0700
commitaae3dbb4776e7916b6cd442d00159bea27a695c1 (patch)
treed074c5d783a81e7e2e084b1eba77f57459da7e37 /drivers/net/ethernet/freescale
parentec3604c7a5aae8953545b0d05495357009a960e5 (diff)
parent66bed8465a808400eb14562510e26c8818082cb8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Support ipv6 checksum offload in sunvnet driver, from Shannon Nelson. 2) Move to RB-tree instead of custom AVL code in inetpeer, from Eric Dumazet. 3) Allow generic XDP to work on virtual devices, from John Fastabend. 4) Add bpf device maps and XDP_REDIRECT, which can be used to build arbitrary switching frameworks using XDP. From John Fastabend. 5) Remove UFO offloads from the tree, gave us little other than bugs. 6) Remove the IPSEC flow cache, from Florian Westphal. 7) Support ipv6 route offload in mlxsw driver. 8) Support VF representors in bnxt_en, from Sathya Perla. 9) Add support for forward error correction modes to ethtool, from Vidya Sagar Ravipati. 10) Add time filter for packet scheduler action dumping, from Jamal Hadi Salim. 11) Extend the zerocopy sendmsg() used by virtio and tap to regular sockets via MSG_ZEROCOPY. From Willem de Bruijn. 12) Significantly rework value tracking in the BPF verifier, from Edward Cree. 13) Add new jump instructions to eBPF, from Daniel Borkmann. 14) Rework rtnetlink plumbing so that operations can be run without taking the RTNL semaphore. From Florian Westphal. 15) Support XDP in tap driver, from Jason Wang. 16) Add 32-bit eBPF JIT for ARM, from Shubham Bansal. 17) Add Huawei hinic ethernet driver. 18) Allow to report MD5 keys in TCP inet_diag dumps, from Ivan Delalande. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1780 commits) i40e: point wb_desc at the nvm_wb_desc during i40e_read_nvm_aq i40e: avoid NVM acquire deadlock during NVM update drivers: net: xgene: Remove return statement from void function drivers: net: xgene: Configure tx/rx delay for ACPI drivers: net: xgene: Read tx/rx delay for ACPI rocker: fix kcalloc parameter order rds: Fix non-atomic operation on shared flag variable net: sched: don't use GFP_KERNEL under spin lock vhost_net: correctly check tx avail during rx busy polling net: mdio-mux: add mdio_mux parameter to mdio_mux_init() rxrpc: Make service connection lookup always check for retry net: stmmac: Delete dead code for MDIO registration gianfar: Fix Tx flow control deactivation cxgb4: Ignore MPS_TX_INT_CAUSE[Bubble] for T6 cxgb4: Fix pause frame count in t4_get_port_stats cxgb4: fix memory leak tun: rename generic_xdp to skb_xdp tun: reserve extra headroom only when XDP is set net: dsa: bcm_sf2: Configure IMP port TC2QOS mapping net: dsa: bcm_sf2: Advertise number of egress queues ...
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c95
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c118
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c114
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h77
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c118
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c783
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h46
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c63
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h7
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c50
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c20
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
19 files changed, 1335 insertions, 186 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 757b873735a5..42258060f142 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -158,7 +158,7 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
dpaa_rx_extra_headroom)
-#define DPAA_ETH_RX_QUEUES 128
+#define DPAA_ETH_PCD_RXQ_NUM 128
#define DPAA_ENQUEUE_RETRIES 100000
@@ -169,6 +169,7 @@ struct fm_port_fqs {
struct dpaa_fq *tx_errq;
struct dpaa_fq *rx_defq;
struct dpaa_fq *rx_errq;
+ struct dpaa_fq *rx_pcdq;
};
/* All the dpa bps in use at any moment */
@@ -235,7 +236,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->max_mtu = dpaa_get_max_mtu();
net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_LLTX);
+ NETIF_F_LLTX | NETIF_F_RXHASH);
net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
/* The kernels enables GSO automatically, if we declare NETIF_F_SG.
@@ -342,18 +343,19 @@ static void dpaa_get_stats64(struct net_device *net_dev,
}
}
-static int dpaa_setup_tc(struct net_device *net_dev, u32 handle,
- u32 chain_index, __be16 proto, struct tc_to_netdev *tc)
+static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct tc_mqprio_qopt *mqprio = type_data;
u8 num_tc;
int i;
- if (tc->type != TC_SETUP_MQPRIO)
- return -EINVAL;
+ if (type != TC_SETUP_MQPRIO)
+ return -EOPNOTSUPP;
- tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = tc->mqprio->num_tc;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = mqprio->num_tc;
if (num_tc == priv->num_tc)
return 0;
@@ -398,8 +400,8 @@ static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
of_dev = of_find_device_by_node(mac_node);
if (!of_dev) {
- dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n",
- mac_node->full_name);
+ dev_err(dpaa_dev, "of_find_device_by_node(%pOF) failed\n",
+ mac_node);
of_node_put(mac_node);
return ERR_PTR(-EINVAL);
}
@@ -627,6 +629,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 5;
break;
case FQ_TYPE_RX_DEFAULT:
+ case FQ_TYPE_RX_PCD:
fq->wq = 6;
break;
case FQ_TYPE_TX:
@@ -687,6 +690,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
struct fm_port_fqs *port_fqs)
{
struct dpaa_fq *dpaa_fq;
+ u32 fq_base, fq_base_aligned, i;
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
if (!dpaa_fq)
@@ -700,6 +704,26 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->rx_defq = &dpaa_fq[0];
+ /* the PCD FQIDs range needs to be aligned for correct operation */
+ if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM))
+ goto fq_alloc_failed;
+
+ fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM);
+
+ for (i = fq_base; i < fq_base_aligned; i++)
+ qman_release_fqid(i);
+
+ for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM;
+ i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++)
+ qman_release_fqid(i);
+
+ dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
+ list, FQ_TYPE_RX_PCD);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_pcdq = &dpaa_fq[0];
+
if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
goto fq_alloc_failed;
@@ -869,13 +893,14 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
const struct dpaa_fq_cbs *fq_cbs,
struct fman_port *tx_port)
{
- int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu;
+ int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
const cpumask_t *affine_cpus = qman_affine_cpus();
- u16 portals[NR_CPUS];
+ u16 channels[NR_CPUS];
struct dpaa_fq *fq;
for_each_cpu(cpu, affine_cpus)
- portals[num_portals++] = qman_affine_channel(cpu);
+ channels[num_portals++] = qman_affine_channel(cpu);
+
if (num_portals == 0)
dev_err(priv->net_dev->dev.parent,
"No Qman software (affine) channels found");
@@ -889,6 +914,12 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
case FQ_TYPE_RX_ERROR:
dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
break;
+ case FQ_TYPE_RX_PCD:
+ if (!num_portals)
+ continue;
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ fq->channel = channels[portal_cnt++ % num_portals];
+ break;
case FQ_TYPE_TX:
dpaa_setup_egress(priv, fq, tx_port,
&fq_cbs->egress_ern);
@@ -1038,7 +1069,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
/* Put all the ingress queues in our "ingress CGR". */
if (priv->use_ingress_cgr &&
(dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
- dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
+ dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
@@ -1169,7 +1201,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
size_t count, struct dpaa_fq *errq,
- struct dpaa_fq *defq,
+ struct dpaa_fq *defq, struct dpaa_fq *pcdq,
struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
@@ -1189,6 +1221,10 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
rx_p = &params.specific_params.rx_params;
rx_p->err_fqid = errq->fqid;
rx_p->dflt_fqid = defq->fqid;
+ if (pcdq) {
+ rx_p->pcd_base_fqid = pcdq->fqid;
+ rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
+ }
count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
@@ -1233,7 +1269,8 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev,
return err;
err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
- port_fqs->rx_defq, &buf_layout[RX]);
+ port_fqs->rx_defq, port_fqs->rx_pcdq,
+ &buf_layout[RX]);
return err;
}
@@ -2200,12 +2237,13 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
dma_addr_t addr = qm_fd_addr(fd);
enum qm_fd_format fd_format;
struct net_device *net_dev;
- u32 fd_status;
+ u32 fd_status, hash_offset;
struct dpaa_bp *dpaa_bp;
struct dpaa_priv *priv;
unsigned int skb_len;
struct sk_buff *skb;
int *count_ptr;
+ void *vaddr;
fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd);
@@ -2251,7 +2289,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
/* prefetch the first 64 bytes of the frame or the SGT start */
- prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd));
+ vaddr = phys_to_virt(addr);
+ prefetch(vaddr + qm_fd_get_offset(fd));
fd_format = qm_fd_get_format(fd);
/* The only FD types that we may receive are contig and S/G */
@@ -2272,6 +2311,18 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
skb->protocol = eth_type_trans(skb, net_dev);
+ if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
+ !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
+ &hash_offset)) {
+ enum pkt_hash_types type;
+
+ /* if L4 exists, it was used in the hash generation */
+ type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+ skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)),
+ type);
+ }
+
skb_len = skb->len;
if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
@@ -2510,6 +2561,9 @@ static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
dpaa_bp->bpid = FSL_DPAA_BPID_INV;
dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
+ if (!dpaa_bp->percpu_count)
+ return ERR_PTR(-ENOMEM);
+
dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
dpaa_bp->seed_cb = dpaa_bp_seed;
@@ -2737,6 +2791,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
if (err)
goto init_ports_failed;
+ /* Rx traffic distribution based on keygen hashing defaults to on */
+ priv->keygen_in_use = true;
+
priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
if (!priv->percpu_priv) {
dev_err(dev, "devm_alloc_percpu() failed\n");
@@ -2829,7 +2886,7 @@ static int dpaa_remove(struct platform_device *pdev)
return err;
}
-static struct platform_device_id dpaa_devtype[] = {
+static const struct platform_device_id dpaa_devtype[] = {
{
.name = "dpaa-ethernet",
.driver_data = 0,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index 9941a7866ebe..bd9422082f83 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -52,6 +52,7 @@
enum dpaa_fq_type {
FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
FQ_TYPE_RX_ERROR, /* Rx Error FQs */
+ FQ_TYPE_RX_PCD, /* Rx Parse Classify Distribute FQs */
FQ_TYPE_TX, /* "Real" Tx FQs */
FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
@@ -158,6 +159,7 @@ struct dpaa_priv {
struct list_head dpaa_fq_list;
u8 num_tc;
+ bool keygen_in_use;
u32 msg_enable; /* net_device message level */
struct {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index ec75d1c6fa89..0d9b185e317f 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -71,6 +71,9 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
case FQ_TYPE_RX_ERROR:
str = "Rx error";
break;
+ case FQ_TYPE_RX_PCD:
+ str = "Rx PCD";
+ break;
case FQ_TYPE_TX_CONFIRM:
str = "Tx default confirmation";
break;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index aad825088357..faea674094b9 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -399,6 +399,122 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
memcpy(strings, dpaa_stats_global, size);
}
+static int dpaa_get_hash_opts(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+
+ cmd->data = 0;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (priv->keygen_in_use)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V4_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V4_FLOW:
+ case ESP_V6_FLOW:
+ if (priv->keygen_in_use)
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ cmd->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *unused)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = dpaa_get_hash_opts(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void dpaa_set_hash(struct net_device *net_dev, bool enable)
+{
+ struct mac_device *mac_dev;
+ struct fman_port *rxport;
+ struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+ rxport = mac_dev->port[0];
+
+ fman_port_use_kg_hash(rxport, enable);
+ priv->keygen_in_use = enable;
+}
+
+static int dpaa_set_hash_opts(struct net_device *dev,
+ struct ethtool_rxnfc *nfc)
+{
+ int ret = -EINVAL;
+
+ /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V4_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V4_FLOW:
+ case ESP_V6_FLOW:
+ dpaa_set_hash(dev, !!nfc->data);
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = dpaa_set_hash_opts(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
@@ -412,4 +528,6 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_strings = dpaa_get_strings,
.get_link_ksettings = dpaa_get_link_ksettings,
.set_link_ksettings = dpaa_set_link_ksettings,
+ .get_rxnfc = dpaa_get_rxnfc,
+ .set_rxnfc = dpaa_set_rxnfc,
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a6e323f15637..56f56d6ada9c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -173,10 +173,12 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif /* CONFIG_M5272 */
/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
+ *
+ * 2048 byte skbufs are allocated. However, alignment requirements
+ * varies between FEC variants. Worst case is 64, so round down by 64.
*/
-#define PKT_MAXBUF_SIZE 1522
+#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
#define PKT_MINBUF_SIZE 64
-#define PKT_MAXBLR_SIZE 1536
/* FEC receive acceleration */
#define FEC_RACC_IPDIS (1 << 1)
@@ -224,7 +226,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define COPYBREAK_DEFAULT 256
-#define TSO_HEADER_SIZE 128
/* Max number of allowed TCP segments for software TSO */
#define FEC_MAX_TSO_SEGS 100
#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
@@ -851,7 +852,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
- writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+ writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
if (i)
@@ -1904,8 +1905,10 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_dev = of_phy_connect(ndev, fep->phy_node,
&fec_enet_adjust_link, 0,
fep->phy_interface);
- if (!phy_dev)
+ if (!phy_dev) {
+ netdev_err(ndev, "Unable to connect to phy\n");
return -ENODEV;
+ }
} else {
/* check for attached phy */
for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index aa8cf5d2a53c..6d7269d87a85 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -960,8 +960,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* We're done ! */
platform_set_drvdata(op, ndev);
- netdev_info(ndev, "%s MAC %pM\n",
- op->dev.of_node->full_name, ndev->dev_addr);
+ netdev_info(ndev, "%pOF MAC %pM\n",
+ op->dev.of_node, ndev->dev_addr);
return 0;
diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
index 60491779e49f..2c38119b172c 100644
--- a/drivers/net/ethernet/freescale/fman/Makefile
+++ b/drivers/net/ethernet/freescale/fman/Makefile
@@ -4,6 +4,6 @@ obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
-fsl_fman-objs := fman_muram.o fman.o fman_sp.o
+fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o
fsl_fman_port-objs := fman_port.o
fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 4aefe2438969..9530405030a7 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -32,9 +32,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "fman.h"
-#include "fman_muram.h"
-
#include <linux/fsl/guts.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -46,6 +43,10 @@
#include <linux/interrupt.h>
#include <linux/libfdt_env.h>
+#include "fman.h"
+#include "fman_muram.h"
+#include "fman_keygen.h"
+
/* General defines */
#define FMAN_LIODN_TBL 64 /* size of LIODN table */
#define MAX_NUM_OF_MACS 10
@@ -56,6 +57,7 @@
/* Modules registers offsets */
#define BMI_OFFSET 0x00080000
#define QMI_OFFSET 0x00080400
+#define KG_OFFSET 0x000C1000
#define DMA_OFFSET 0x000C2000
#define FPM_OFFSET 0x000C3000
#define IMEM_OFFSET 0x000C4000
@@ -564,80 +566,6 @@ struct fman_cfg {
u32 qmi_def_tnums_thresh;
};
-/* Structure that holds information received from device tree */
-struct fman_dts_params {
- void __iomem *base_addr; /* FMan virtual address */
- struct resource *res; /* FMan memory resource */
- u8 id; /* FMan ID */
-
- int err_irq; /* FMan Error IRQ */
-
- u16 clk_freq; /* FMan clock freq (In Mhz) */
-
- u32 qman_channel_base; /* QMan channels base */
- u32 num_of_qman_channels; /* Number of QMan channels */
-
- struct resource muram_res; /* MURAM resource */
-};
-
-/** fman_exceptions_cb
- * fman - Pointer to FMan
- * exception - The exception.
- *
- * Exceptions user callback routine, will be called upon an exception
- * passing the exception identification.
- *
- * Return: irq status
- */
-typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
- enum fman_exceptions exception);
-
-/** fman_bus_error_cb
- * fman - Pointer to FMan
- * port_id - Port id
- * addr - Address that caused the error
- * tnum - Owner of error
- * liodn - Logical IO device number
- *
- * Bus error user callback routine, will be called upon bus error,
- * passing parameters describing the errors and the owner.
- *
- * Return: IRQ status
- */
-typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
- u64 addr, u8 tnum, u16 liodn);
-
-struct fman {
- struct device *dev;
- void __iomem *base_addr;
- struct fman_intr_src intr_mng[FMAN_EV_CNT];
-
- struct fman_fpm_regs __iomem *fpm_regs;
- struct fman_bmi_regs __iomem *bmi_regs;
- struct fman_qmi_regs __iomem *qmi_regs;
- struct fman_dma_regs __iomem *dma_regs;
- struct fman_hwp_regs __iomem *hwp_regs;
- fman_exceptions_cb *exception_cb;
- fman_bus_error_cb *bus_error_cb;
- /* Spinlock for FMan use */
- spinlock_t spinlock;
- struct fman_state_struct *state;
-
- struct fman_cfg *cfg;
- struct muram_info *muram;
- /* cam section in muram */
- unsigned long cam_offset;
- size_t cam_size;
- /* Fifo in MURAM */
- unsigned long fifo_offset;
- size_t fifo_size;
-
- u32 liodn_base[64];
- u32 liodn_offset[64];
-
- struct fman_dts_params dts_params;
-};
-
static irqreturn_t fman_exceptions(struct fman *fman,
enum fman_exceptions exception)
{
@@ -1811,6 +1739,7 @@ static int fman_config(struct fman *fman)
fman->qmi_regs = base_addr + QMI_OFFSET;
fman->dma_regs = base_addr + DMA_OFFSET;
fman->hwp_regs = base_addr + HWP_OFFSET;
+ fman->kg_regs = base_addr + KG_OFFSET;
fman->base_addr = base_addr;
spin_lock_init(&fman->spinlock);
@@ -1925,8 +1854,8 @@ static int fman_reset(struct fman *fman)
guts_regs = of_iomap(guts_node, 0);
if (!guts_regs) {
- dev_err(fman->dev, "%s: Couldn't map %s regs\n",
- __func__, guts_node->full_name);
+ dev_err(fman->dev, "%s: Couldn't map %pOF regs\n",
+ __func__, guts_node);
goto guts_regs;
}
#define FMAN1_ALL_MACS_MASK 0xFCC00000
@@ -2083,6 +2012,11 @@ static int fman_init(struct fman *fman)
/* Init HW Parser */
hwp_init(fman->hwp_regs);
+ /* Init KeyGen */
+ fman->keygen = keygen_init(fman->kg_regs);
+ if (!fman->keygen)
+ return -EINVAL;
+
err = enable(fman, cfg);
if (err != 0)
return err;
@@ -2434,15 +2368,21 @@ u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
int i;
if (fman->state->rev_info.major >= 6) {
- u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
- 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+ static const u32 port_ids[] = {
+ 0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7
+ };
+
for (i = 0; i < fman->state->num_of_qman_channels; i++) {
if (port_ids[i] == port_id)
break;
}
} else {
- u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
- 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+ static const u32 port_ids[] = {
+ 0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
+ 0x2, 0x3, 0x4, 0x5, 0x7, 0x7
+ };
+
for (i = 0; i < fman->state->num_of_qman_channels; i++) {
if (port_ids[i] == port_id)
break;
@@ -2780,8 +2720,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
err = of_property_read_u32(fm_node, "cell-index", &val);
if (err) {
- dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
- __func__, fm_node->full_name);
+ dev_err(&of_dev->dev, "%s: failed to read cell-index for %pOF\n",
+ __func__, fm_node);
goto fman_node_put;
}
fman->dts_params.id = (u8)val;
@@ -2834,8 +2774,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
&range[0], 2);
if (err) {
- dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
- __func__, fm_node->full_name);
+ dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %pOF\n",
+ __func__, fm_node);
goto fman_node_put;
}
fman->dts_params.qman_channel_base = range[0];
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index f53e1473dbcc..bfa02e0014ae 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -34,6 +34,8 @@
#define __FM_H
#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
/* FM Frame descriptor macros */
/* Frame queue Context Override */
@@ -274,6 +276,81 @@ struct fman_intr_src {
void *src_handle;
};
+/** fman_exceptions_cb
+ * fman - Pointer to FMan
+ * exception - The exception.
+ *
+ * Exceptions user callback routine, will be called upon an exception
+ * passing the exception identification.
+ *
+ * Return: irq status
+ */
+typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
+ enum fman_exceptions exception);
+/** fman_bus_error_cb
+ * fman - Pointer to FMan
+ * port_id - Port id
+ * addr - Address that caused the error
+ * tnum - Owner of error
+ * liodn - Logical IO device number
+ *
+ * Bus error user callback routine, will be called upon bus error,
+ * passing parameters describing the errors and the owner.
+ *
+ * Return: IRQ status
+ */
+typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
+ u64 addr, u8 tnum, u16 liodn);
+
+/* Structure that holds information received from device tree */
+struct fman_dts_params {
+ void __iomem *base_addr; /* FMan virtual address */
+ struct resource *res; /* FMan memory resource */
+ u8 id; /* FMan ID */
+
+ int err_irq; /* FMan Error IRQ */
+
+ u16 clk_freq; /* FMan clock freq (In Mhz) */
+
+ u32 qman_channel_base; /* QMan channels base */
+ u32 num_of_qman_channels; /* Number of QMan channels */
+
+ struct resource muram_res; /* MURAM resource */
+};
+
+struct fman {
+ struct device *dev;
+ void __iomem *base_addr;
+ struct fman_intr_src intr_mng[FMAN_EV_CNT];
+
+ struct fman_fpm_regs __iomem *fpm_regs;
+ struct fman_bmi_regs __iomem *bmi_regs;
+ struct fman_qmi_regs __iomem *qmi_regs;
+ struct fman_dma_regs __iomem *dma_regs;
+ struct fman_hwp_regs __iomem *hwp_regs;
+ struct fman_kg_regs __iomem *kg_regs;
+ fman_exceptions_cb *exception_cb;
+ fman_bus_error_cb *bus_error_cb;
+ /* Spinlock for FMan use */
+ spinlock_t spinlock;
+ struct fman_state_struct *state;
+
+ struct fman_cfg *cfg;
+ struct muram_info *muram;
+ struct fman_keygen *keygen;
+ /* cam section in muram */
+ unsigned long cam_offset;
+ size_t cam_size;
+ /* Fifo in MURAM */
+ unsigned long fifo_offset;
+ size_t fifo_size;
+
+ u32 liodn_base[64];
+ u32 liodn_offset[64];
+
+ struct fman_dts_params dts_params;
+};
+
/* Structure for port-FM communication during fman_port_init. */
struct fman_port_init_params {
u8 port_id; /* port Id */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 98bba10fc38c..ea43b4974149 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -123,7 +123,7 @@
#define DTSEC_ECNTRL_R100M 0x00000008
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
-#define DTSEC_TCTRL_GTS 0x00000020
+#define TCTRL_GTS 0x00000020
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_PAL_SHIFT 16
@@ -863,6 +863,52 @@ int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
return 0;
}
+static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+
+ if (mode & COMM_MODE_TX)
+ iowrite32be(ioread32be(&regs->tctrl) &
+ ~TCTRL_GTS, &regs->tctrl);
+ if (mode & COMM_MODE_RX)
+ iowrite32be(ioread32be(&regs->rctrl) &
+ ~RCTRL_GRS, &regs->rctrl);
+}
+
+static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ /* Graceful stop - Assert the graceful Rx stop bit */
+ if (mode & COMM_MODE_RX) {
+ tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, &regs->rctrl);
+
+ if (dtsec->fm_rev_info.major == 2) {
+ /* Workaround for dTSEC Errata A002 */
+ usleep_range(100, 200);
+ } else {
+ /* Workaround for dTSEC Errata A004839 */
+ usleep_range(10, 50);
+ }
+ }
+
+ /* Graceful stop - Assert the graceful Tx stop bit */
+ if (mode & COMM_MODE_TX) {
+ if (dtsec->fm_rev_info.major == 2) {
+ /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
+ pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
+ } else {
+ tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
+ iowrite32be(tmp, &regs->tctrl);
+
+ /* Workaround for dTSEC Errata A0012, A0014 */
+ usleep_range(10, 50);
+ }
+ }
+}
+
int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
@@ -880,13 +926,8 @@ int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
iowrite32be(tmp, &regs->maccfg1);
- /* Graceful start - clear the graceful receive stop bit */
- if (mode & COMM_MODE_TX)
- iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS,
- &regs->tctrl);
- if (mode & COMM_MODE_RX)
- iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS,
- &regs->rctrl);
+ /* Graceful start - clear the graceful Rx/Tx stop bit */
+ graceful_start(dtsec, mode);
return 0;
}
@@ -899,23 +940,8 @@ int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- /* Gracefull stop - Assert the graceful transmit stop bit */
- if (mode & COMM_MODE_RX) {
- tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
- iowrite32be(tmp, &regs->rctrl);
-
- if (dtsec->fm_rev_info.major == 2)
- usleep_range(100, 200);
- else
- udelay(10);
- }
-
- if (mode & COMM_MODE_TX) {
- if (dtsec->fm_rev_info.major == 2)
- pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
- else
- pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
- }
+ /* Graceful stop - Assert the graceful Rx/Tx stop bit */
+ graceful_stop(dtsec, mode);
tmp = ioread32be(&regs->maccfg1);
if (mode & COMM_MODE_RX)
@@ -933,11 +959,19 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
u16 pause_time, u16 __maybe_unused thresh_time)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
+ enum comm_mode mode = COMM_MODE_NONE;
u32 ptv = 0;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
+ if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
+ mode |= COMM_MODE_RX;
+ if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
+ mode |= COMM_MODE_TX;
+
+ graceful_stop(dtsec, mode);
+
if (pause_time) {
/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
@@ -958,17 +992,27 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
&regs->maccfg1);
+ graceful_start(dtsec, mode);
+
return 0;
}
int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
+ enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
+ if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
+ mode |= COMM_MODE_RX;
+ if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
+ mode |= COMM_MODE_TX;
+
+ graceful_stop(dtsec, mode);
+
tmp = ioread32be(&regs->maccfg1);
if (en)
tmp |= MACCFG1_RX_FLOW;
@@ -976,20 +1020,34 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
tmp &= ~MACCFG1_RX_FLOW;
iowrite32be(tmp, &regs->maccfg1);
+ graceful_start(dtsec, mode);
+
return 0;
}
int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ enum comm_mode mode = COMM_MODE_NONE;
+
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
+ if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
+ mode |= COMM_MODE_RX;
+ if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
+ mode |= COMM_MODE_TX;
+
+ graceful_stop(dtsec, mode);
+
/* Initialize MAC Station Address registers (1 & 2)
* Station address have to be swapped (big endian to little endian
*/
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
+ graceful_start(dtsec, mode);
+
return 0;
}
@@ -1162,11 +1220,19 @@ int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
+ enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
+ if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
+ mode |= COMM_MODE_RX;
+ if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
+ mode |= COMM_MODE_TX;
+
+ graceful_stop(dtsec, mode);
+
tmp = ioread32be(&regs->maccfg2);
/* Full Duplex */
@@ -1186,6 +1252,8 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
tmp &= ~DTSEC_ECNTRL_R100M;
iowrite32be(tmp, &regs->ecntrl);
+ graceful_start(dtsec, mode);
+
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
new file mode 100644
index 000000000000..f54da3c684d0
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -0,0 +1,783 @@
+/*
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of NXP nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+
+#include "fman_keygen.h"
+
+/* Maximum number of HW Ports */
+#define FMAN_MAX_NUM_OF_HW_PORTS 64
+
+/* Maximum number of KeyGen Schemes */
+#define FM_KG_MAX_NUM_OF_SCHEMES 32
+
+/* Number of generic KeyGen Generic Extract Command Registers */
+#define FM_KG_NUM_OF_GENERIC_REGS 8
+
+/* Dummy port ID */
+#define DUMMY_PORT_ID 0
+
+/* Select Scheme Value Register */
+#define KG_SCH_DEF_USE_KGSE_DV_0 2
+#define KG_SCH_DEF_USE_KGSE_DV_1 3
+
+/* Registers Shifting values */
+#define FM_KG_KGAR_NUM_SHIFT 16
+#define KG_SCH_DEF_L4_PORT_SHIFT 8
+#define KG_SCH_DEF_IP_ADDR_SHIFT 18
+#define KG_SCH_HASH_CONFIG_SHIFT_SHIFT 24
+
+/* KeyGen Registers bit field masks: */
+
+/* Enable bit field mask for KeyGen General Configuration Register */
+#define FM_KG_KGGCR_EN 0x80000000
+
+/* KeyGen Global Registers bit field masks */
+#define FM_KG_KGAR_GO 0x80000000
+#define FM_KG_KGAR_READ 0x40000000
+#define FM_KG_KGAR_WRITE 0x00000000
+#define FM_KG_KGAR_SEL_SCHEME_ENTRY 0x00000000
+#define FM_KG_KGAR_SCM_WSEL_UPDATE_CNT 0x00008000
+
+#define FM_KG_KGAR_ERR 0x20000000
+#define FM_KG_KGAR_SEL_CLS_PLAN_ENTRY 0x01000000
+#define FM_KG_KGAR_SEL_PORT_ENTRY 0x02000000
+#define FM_KG_KGAR_SEL_PORT_WSEL_SP 0x00008000
+#define FM_KG_KGAR_SEL_PORT_WSEL_CPP 0x00004000
+
+/* Error events exceptions */
+#define FM_EX_KG_DOUBLE_ECC 0x80000000
+#define FM_EX_KG_KEYSIZE_OVERFLOW 0x40000000
+
+/* Scheme Registers bit field masks */
+#define KG_SCH_MODE_EN 0x80000000
+#define KG_SCH_VSP_NO_KSP_EN 0x80000000
+#define KG_SCH_HASH_CONFIG_SYM 0x40000000
+
+/* Known Protocol field codes */
+#define KG_SCH_KN_PORT_ID 0x80000000
+#define KG_SCH_KN_MACDST 0x40000000
+#define KG_SCH_KN_MACSRC 0x20000000
+#define KG_SCH_KN_TCI1 0x10000000
+#define KG_SCH_KN_TCI2 0x08000000
+#define KG_SCH_KN_ETYPE 0x04000000
+#define KG_SCH_KN_PPPSID 0x02000000
+#define KG_SCH_KN_PPPID 0x01000000
+#define KG_SCH_KN_MPLS1 0x00800000
+#define KG_SCH_KN_MPLS2 0x00400000
+#define KG_SCH_KN_MPLS_LAST 0x00200000
+#define KG_SCH_KN_IPSRC1 0x00100000
+#define KG_SCH_KN_IPDST1 0x00080000
+#define KG_SCH_KN_PTYPE1 0x00040000
+#define KG_SCH_KN_IPTOS_TC1 0x00020000
+#define KG_SCH_KN_IPV6FL1 0x00010000
+#define KG_SCH_KN_IPSRC2 0x00008000
+#define KG_SCH_KN_IPDST2 0x00004000
+#define KG_SCH_KN_PTYPE2 0x00002000
+#define KG_SCH_KN_IPTOS_TC2 0x00001000
+#define KG_SCH_KN_IPV6FL2 0x00000800
+#define KG_SCH_KN_GREPTYPE 0x00000400
+#define KG_SCH_KN_IPSEC_SPI 0x00000200
+#define KG_SCH_KN_IPSEC_NH 0x00000100
+#define KG_SCH_KN_IPPID 0x00000080
+#define KG_SCH_KN_L4PSRC 0x00000004
+#define KG_SCH_KN_L4PDST 0x00000002
+#define KG_SCH_KN_TFLG 0x00000001
+
+/* NIA values */
+#define NIA_ENG_BMI 0x00500000
+#define NIA_BMI_AC_ENQ_FRAME 0x00000002
+#define ENQUEUE_KG_DFLT_NIA (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)
+
+/* Hard-coded configuration:
+ * These values are used as hard-coded values for KeyGen configuration
+ * and they replace user selections for this hard-coded version
+ */
+
+/* Hash distribution shift */
+#define DEFAULT_HASH_DIST_FQID_SHIFT 0
+
+/* Hash shift */
+#define DEFAULT_HASH_SHIFT 0
+
+/* Symmetric hash usage:
+ * Warning:
+ * - the value for symmetric hash usage must be in accordance with hash
+ * key defined below
+ * - according to tests performed, spreading is not working if symmetric
+ * hash is set on true
+ * So ultimately symmetric hash functionality should be always disabled:
+ */
+#define DEFAULT_SYMMETRIC_HASH false
+
+/* Hash Key extraction fields: */
+#define DEFAULT_HASH_KEY_EXTRACT_FIELDS \
+ (KG_SCH_KN_IPSRC1 | KG_SCH_KN_IPDST1 | \
+ KG_SCH_KN_L4PSRC | KG_SCH_KN_L4PDST)
+
+/* Default values to be used as hash key in case IPv4 or L4 (TCP, UDP)
+ * don't exist in the frame
+ */
+/* Default IPv4 address */
+#define DEFAULT_HASH_KEY_IPv4_ADDR 0x0A0A0A0A
+/* Default L4 port */
+#define DEFAULT_HASH_KEY_L4_PORT 0x0B0B0B0B
+
+/* KeyGen Memory Mapped Registers: */
+
+/* Scheme Configuration RAM Registers */
+struct fman_kg_scheme_regs {
+ u32 kgse_mode; /* 0x100: MODE */
+ u32 kgse_ekfc; /* 0x104: Extract Known Fields Command */
+ u32 kgse_ekdv; /* 0x108: Extract Known Default Value */
+ u32 kgse_bmch; /* 0x10C: Bit Mask Command High */
+ u32 kgse_bmcl; /* 0x110: Bit Mask Command Low */
+ u32 kgse_fqb; /* 0x114: Frame Queue Base */
+ u32 kgse_hc; /* 0x118: Hash Command */
+ u32 kgse_ppc; /* 0x11C: Policer Profile Command */
+ u32 kgse_gec[FM_KG_NUM_OF_GENERIC_REGS];
+ /* 0x120: Generic Extract Command */
+ u32 kgse_spc;
+ /* 0x140: KeyGen Scheme Entry Statistic Packet Counter */
+ u32 kgse_dv0; /* 0x144: KeyGen Scheme Entry Default Value 0 */
+ u32 kgse_dv1; /* 0x148: KeyGen Scheme Entry Default Value 1 */
+ u32 kgse_ccbs;
+ /* 0x14C: KeyGen Scheme Entry Coarse Classification Bit*/
+ u32 kgse_mv; /* 0x150: KeyGen Scheme Entry Match vector */
+ u32 kgse_om; /* 0x154: KeyGen Scheme Entry Operation Mode bits */
+ u32 kgse_vsp;
+ /* 0x158: KeyGen Scheme Entry Virtual Storage Profile */
+};
+
+/* Port Partition Configuration Registers */
+struct fman_kg_pe_regs {
+ u32 fmkg_pe_sp; /* 0x100: KeyGen Port entry Scheme Partition */
+ u32 fmkg_pe_cpp;
+ /* 0x104: KeyGen Port Entry Classification Plan Partition */
+};
+
+/* General Configuration and Status Registers
+ * Global Statistic Counters
+ * KeyGen Global Registers
+ */
+struct fman_kg_regs {
+ u32 fmkg_gcr; /* 0x000: KeyGen General Configuration Register */
+ u32 res004; /* 0x004: Reserved */
+ u32 res008; /* 0x008: Reserved */
+ u32 fmkg_eer; /* 0x00C: KeyGen Error Event Register */
+ u32 fmkg_eeer; /* 0x010: KeyGen Error Event Enable Register */
+ u32 res014; /* 0x014: Reserved */
+ u32 res018; /* 0x018: Reserved */
+ u32 fmkg_seer; /* 0x01C: KeyGen Scheme Error Event Register */
+ u32 fmkg_seeer; /* 0x020: KeyGen Scheme Error Event Enable Register */
+ u32 fmkg_gsr; /* 0x024: KeyGen Global Status Register */
+ u32 fmkg_tpc; /* 0x028: Total Packet Counter Register */
+ u32 fmkg_serc; /* 0x02C: Soft Error Capture Register */
+ u32 res030[4]; /* 0x030: Reserved */
+ u32 fmkg_fdor; /* 0x034: Frame Data Offset Register */
+ u32 fmkg_gdv0r; /* 0x038: Global Default Value Register 0 */
+ u32 fmkg_gdv1r; /* 0x03C: Global Default Value Register 1 */
+ u32 res04c[6]; /* 0x040: Reserved */
+ u32 fmkg_feer; /* 0x044: Force Error Event Register */
+ u32 res068[38]; /* 0x048: Reserved */
+ union {
+ u32 fmkg_indirect[63]; /* 0x100: Indirect Access Registers */
+ struct fman_kg_scheme_regs fmkg_sch; /* Scheme Registers */
+ struct fman_kg_pe_regs fmkg_pe; /* Port Partition Registers */
+ };
+ u32 fmkg_ar; /* 0x1FC: KeyGen Action Register */
+};
+
+/* KeyGen Scheme data */
+struct keygen_scheme {
+ bool used; /* Specifies if this scheme is used */
+ u8 hw_port_id;
+ /* Hardware port ID
+ * schemes sharing between multiple ports is not
+ * currently supported
+ * so we have only one port id bound to a scheme
+ */
+ u32 base_fqid;
+ /* Base FQID:
+ * Must be between 1 and 2^24-1
+ * If hash is used and an even distribution is
+ * expected according to hash_fqid_count,
+ * base_fqid must be aligned to hash_fqid_count
+ */
+ u32 hash_fqid_count;
+ /* FQ range for hash distribution:
+ * Must be a power of 2
+ * Represents the range of queues for spreading
+ */
+ bool use_hashing; /* Usage of Hashing and spreading over FQ */
+ bool symmetric_hash; /* Symmetric Hash option usage */
+ u8 hashShift;
+ /* Hash result right shift.
+ * Select the 24 bits out of the 64 hash result.
+ * 0 means using the 24 LSB's, otherwise
+ * use the 24 LSB's after shifting right
+ */
+ u32 match_vector; /* Match Vector */
+};
+
+/* KeyGen driver data */
+struct fman_keygen {
+ struct keygen_scheme schemes[FM_KG_MAX_NUM_OF_SCHEMES];
+ /* Array of schemes */
+ struct fman_kg_regs __iomem *keygen_regs; /* KeyGen registers */
+};
+
+/* keygen_write_ar_wait
+ *
+ * Write Action Register with specified value, wait for GO bit field to be
+ * idle and then read the error
+ *
+ * regs: KeyGen registers
+ * fmkg_ar: Action Register value
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+static int keygen_write_ar_wait(struct fman_kg_regs __iomem *regs, u32 fmkg_ar)
+{
+ iowrite32be(fmkg_ar, &regs->fmkg_ar);
+
+ /* Wait for GO bit field to be idle */
+ while (fmkg_ar & FM_KG_KGAR_GO)
+ fmkg_ar = ioread32be(&regs->fmkg_ar);
+
+ if (fmkg_ar & FM_KG_KGAR_ERR)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* build_ar_scheme
+ *
+ * Build Action Register value for scheme settings
+ *
+ * scheme_id: Scheme ID
+ * update_counter: update scheme counter
+ * write: true for action to write the scheme or false for read action
+ *
+ * Return: AR value
+ */
+static u32 build_ar_scheme(u8 scheme_id, bool update_counter, bool write)
+{
+ u32 rw = (u32)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ);
+
+ return (u32)(FM_KG_KGAR_GO |
+ rw |
+ FM_KG_KGAR_SEL_SCHEME_ENTRY |
+ DUMMY_PORT_ID |
+ ((u32)scheme_id << FM_KG_KGAR_NUM_SHIFT) |
+ (update_counter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT : 0));
+}
+
+/* build_ar_bind_scheme
+ *
+ * Build Action Register value for port binding to schemes
+ *
+ * hwport_id: HW Port ID
+ * write: true for action to write the bind or false for read action
+ *
+ * Return: AR value
+ */
+static u32 build_ar_bind_scheme(u8 hwport_id, bool write)
+{
+ u32 rw = write ? (u32)FM_KG_KGAR_WRITE : (u32)FM_KG_KGAR_READ;
+
+ return (u32)(FM_KG_KGAR_GO |
+ rw |
+ FM_KG_KGAR_SEL_PORT_ENTRY |
+ hwport_id |
+ FM_KG_KGAR_SEL_PORT_WSEL_SP);
+}
+
+/* keygen_write_sp
+ *
+ * Write Scheme Partition Register with specified value
+ *
+ * regs: KeyGen Registers
+ * sp: Scheme Partition register value
+ * add: true to add a scheme partition or false to clear
+ *
+ * Return: none
+ */
+static void keygen_write_sp(struct fman_kg_regs __iomem *regs, u32 sp, bool add)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&regs->fmkg_pe.fmkg_pe_sp);
+
+ if (add)
+ tmp |= sp;
+ else
+ tmp &= ~sp;
+
+ iowrite32be(tmp, &regs->fmkg_pe.fmkg_pe_sp);
+}
+
+/* build_ar_bind_cls_plan
+ *
+ * Build Action Register value for Classification Plan
+ *
+ * hwport_id: HW Port ID
+ * write: true for action to write the CP or false for read action
+ *
+ * Return: AR value
+ */
+static u32 build_ar_bind_cls_plan(u8 hwport_id, bool write)
+{
+ u32 rw = write ? (u32)FM_KG_KGAR_WRITE : (u32)FM_KG_KGAR_READ;
+
+ return (u32)(FM_KG_KGAR_GO |
+ rw |
+ FM_KG_KGAR_SEL_PORT_ENTRY |
+ hwport_id |
+ FM_KG_KGAR_SEL_PORT_WSEL_CPP);
+}
+
+/* keygen_write_cpp
+ *
+ * Write Classification Plan Partition Register with specified value
+ *
+ * regs: KeyGen Registers
+ * cpp: CPP register value
+ *
+ * Return: none
+ */
+static void keygen_write_cpp(struct fman_kg_regs __iomem *regs, u32 cpp)
+{
+ iowrite32be(cpp, &regs->fmkg_pe.fmkg_pe_cpp);
+}
+
+/* keygen_write_scheme
+ *
+ * Write all Schemes Registers with specified values
+ *
+ * regs: KeyGen Registers
+ * scheme_id: Scheme ID
+ * scheme_regs: Scheme registers values desired to be written
+ * update_counter: update scheme counter
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+static int keygen_write_scheme(struct fman_kg_regs __iomem *regs, u8 scheme_id,
+ struct fman_kg_scheme_regs *scheme_regs,
+ bool update_counter)
+{
+ u32 ar_reg;
+ int err, i;
+
+ /* Write indirect scheme registers */
+ iowrite32be(scheme_regs->kgse_mode, &regs->fmkg_sch.kgse_mode);
+ iowrite32be(scheme_regs->kgse_ekfc, &regs->fmkg_sch.kgse_ekfc);
+ iowrite32be(scheme_regs->kgse_ekdv, &regs->fmkg_sch.kgse_ekdv);
+ iowrite32be(scheme_regs->kgse_bmch, &regs->fmkg_sch.kgse_bmch);
+ iowrite32be(scheme_regs->kgse_bmcl, &regs->fmkg_sch.kgse_bmcl);
+ iowrite32be(scheme_regs->kgse_fqb, &regs->fmkg_sch.kgse_fqb);
+ iowrite32be(scheme_regs->kgse_hc, &regs->fmkg_sch.kgse_hc);
+ iowrite32be(scheme_regs->kgse_ppc, &regs->fmkg_sch.kgse_ppc);
+ iowrite32be(scheme_regs->kgse_spc, &regs->fmkg_sch.kgse_spc);
+ iowrite32be(scheme_regs->kgse_dv0, &regs->fmkg_sch.kgse_dv0);
+ iowrite32be(scheme_regs->kgse_dv1, &regs->fmkg_sch.kgse_dv1);
+ iowrite32be(scheme_regs->kgse_ccbs, &regs->fmkg_sch.kgse_ccbs);
+ iowrite32be(scheme_regs->kgse_mv, &regs->fmkg_sch.kgse_mv);
+ iowrite32be(scheme_regs->kgse_om, &regs->fmkg_sch.kgse_om);
+ iowrite32be(scheme_regs->kgse_vsp, &regs->fmkg_sch.kgse_vsp);
+
+ for (i = 0 ; i < FM_KG_NUM_OF_GENERIC_REGS ; i++)
+ iowrite32be(scheme_regs->kgse_gec[i],
+ &regs->fmkg_sch.kgse_gec[i]);
+
+ /* Write AR (Action register) */
+ ar_reg = build_ar_scheme(scheme_id, update_counter, true);
+ err = keygen_write_ar_wait(regs, ar_reg);
+ if (err != 0) {
+ pr_err("Writing Action Register failed\n");
+ return err;
+ }
+
+ return err;
+}
+
+/* get_free_scheme_id
+ *
+ * Find the first free scheme available to be used
+ *
+ * keygen: KeyGen handle
+ * scheme_id: pointer to scheme id
+ *
+ * Return: 0 on success, -EINVAL when the are no available free schemes
+ */
+static int get_free_scheme_id(struct fman_keygen *keygen, u8 *scheme_id)
+{
+ u8 i;
+
+ for (i = 0; i < FM_KG_MAX_NUM_OF_SCHEMES; i++)
+ if (!keygen->schemes[i].used) {
+ *scheme_id = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* get_scheme
+ *
+ * Provides the scheme for specified ID
+ *
+ * keygen: KeyGen handle
+ * scheme_id: Scheme ID
+ *
+ * Return: handle to required scheme
+ */
+static struct keygen_scheme *get_scheme(struct fman_keygen *keygen,
+ u8 scheme_id)
+{
+ if (scheme_id >= FM_KG_MAX_NUM_OF_SCHEMES)
+ return NULL;
+ return &keygen->schemes[scheme_id];
+}
+
+/* keygen_bind_port_to_schemes
+ *
+ * Bind the port to schemes
+ *
+ * keygen: KeyGen handle
+ * scheme_id: id of the scheme to bind to
+ * bind: true to bind the port or false to unbind it
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+static int keygen_bind_port_to_schemes(struct fman_keygen *keygen,
+ u8 scheme_id,
+ bool bind)
+{
+ struct fman_kg_regs __iomem *keygen_regs = keygen->keygen_regs;
+ struct keygen_scheme *scheme;
+ u32 ar_reg;
+ u32 schemes_vector = 0;
+ int err;
+
+ scheme = get_scheme(keygen, scheme_id);
+ if (!scheme) {
+ pr_err("Requested Scheme does not exist\n");
+ return -EINVAL;
+ }
+ if (!scheme->used) {
+ pr_err("Cannot bind port to an invalid scheme\n");
+ return -EINVAL;
+ }
+
+ schemes_vector |= 1 << (31 - scheme_id);
+
+ ar_reg = build_ar_bind_scheme(scheme->hw_port_id, false);
+ err = keygen_write_ar_wait(keygen_regs, ar_reg);
+ if (err != 0) {
+ pr_err("Reading Action Register failed\n");
+ return err;
+ }
+
+ keygen_write_sp(keygen_regs, schemes_vector, bind);
+
+ ar_reg = build_ar_bind_scheme(scheme->hw_port_id, true);
+ err = keygen_write_ar_wait(keygen_regs, ar_reg);
+ if (err != 0) {
+ pr_err("Writing Action Register failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* keygen_scheme_setup
+ *
+ * Setup the scheme according to required configuration
+ *
+ * keygen: KeyGen handle
+ * scheme_id: scheme ID
+ * enable: true to enable scheme or false to disable it
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+static int keygen_scheme_setup(struct fman_keygen *keygen, u8 scheme_id,
+ bool enable)
+{
+ struct fman_kg_regs __iomem *keygen_regs = keygen->keygen_regs;
+ struct fman_kg_scheme_regs scheme_regs;
+ struct keygen_scheme *scheme;
+ u32 tmp_reg;
+ int err;
+
+ scheme = get_scheme(keygen, scheme_id);
+ if (!scheme) {
+ pr_err("Requested Scheme does not exist\n");
+ return -EINVAL;
+ }
+ if (enable && scheme->used) {
+ pr_err("The requested Scheme is already used\n");
+ return -EINVAL;
+ }
+
+ /* Clear scheme registers */
+ memset(&scheme_regs, 0, sizeof(struct fman_kg_scheme_regs));
+
+ /* Setup all scheme registers: */
+ tmp_reg = 0;
+
+ if (enable) {
+ /* Enable Scheme */
+ tmp_reg |= KG_SCH_MODE_EN;
+ /* Enqueue frame NIA */
+ tmp_reg |= ENQUEUE_KG_DFLT_NIA;
+ }
+
+ scheme_regs.kgse_mode = tmp_reg;
+
+ scheme_regs.kgse_mv = scheme->match_vector;
+
+ /* Scheme don't override StorageProfile:
+ * valid only for DPAA_VERSION >= 11
+ */
+ scheme_regs.kgse_vsp = KG_SCH_VSP_NO_KSP_EN;
+
+ /* Configure Hard-Coded Rx Hashing: */
+
+ if (scheme->use_hashing) {
+ /* configure kgse_ekfc */
+ scheme_regs.kgse_ekfc = DEFAULT_HASH_KEY_EXTRACT_FIELDS;
+
+ /* configure kgse_ekdv */
+ tmp_reg = 0;
+ tmp_reg |= (KG_SCH_DEF_USE_KGSE_DV_0 <<
+ KG_SCH_DEF_IP_ADDR_SHIFT);
+ tmp_reg |= (KG_SCH_DEF_USE_KGSE_DV_1 <<
+ KG_SCH_DEF_L4_PORT_SHIFT);
+ scheme_regs.kgse_ekdv = tmp_reg;
+
+ /* configure kgse_dv0 */
+ scheme_regs.kgse_dv0 = DEFAULT_HASH_KEY_IPv4_ADDR;
+ /* configure kgse_dv1 */
+ scheme_regs.kgse_dv1 = DEFAULT_HASH_KEY_L4_PORT;
+
+ /* configure kgse_hc */
+ tmp_reg = 0;
+ tmp_reg |= ((scheme->hash_fqid_count - 1) <<
+ DEFAULT_HASH_DIST_FQID_SHIFT);
+ tmp_reg |= scheme->hashShift << KG_SCH_HASH_CONFIG_SHIFT_SHIFT;
+
+ if (scheme->symmetric_hash) {
+ /* Normally extraction key should be verified if
+ * complies with symmetric hash
+ * But because extraction is hard-coded, we are sure
+ * the key is symmetric
+ */
+ tmp_reg |= KG_SCH_HASH_CONFIG_SYM;
+ }
+ scheme_regs.kgse_hc = tmp_reg;
+ } else {
+ scheme_regs.kgse_ekfc = 0;
+ scheme_regs.kgse_hc = 0;
+ scheme_regs.kgse_ekdv = 0;
+ scheme_regs.kgse_dv0 = 0;
+ scheme_regs.kgse_dv1 = 0;
+ }
+
+ /* configure kgse_fqb: Scheme FQID base */
+ tmp_reg = 0;
+ tmp_reg |= scheme->base_fqid;
+ scheme_regs.kgse_fqb = tmp_reg;
+
+ /* features not used by hard-coded configuration */
+ scheme_regs.kgse_bmch = 0;
+ scheme_regs.kgse_bmcl = 0;
+ scheme_regs.kgse_spc = 0;
+
+ /* Write scheme registers */
+ err = keygen_write_scheme(keygen_regs, scheme_id, &scheme_regs, true);
+ if (err != 0) {
+ pr_err("Writing scheme registers failed\n");
+ return err;
+ }
+
+ /* Update used field for Scheme */
+ scheme->used = enable;
+
+ return 0;
+}
+
+/* keygen_init
+ *
+ * KeyGen initialization:
+ * Initializes and enables KeyGen, allocate driver memory, setup registers,
+ * clear port bindings, invalidate all schemes
+ *
+ * keygen_regs: KeyGen registers base address
+ *
+ * Return: Handle to KeyGen driver
+ */
+struct fman_keygen *keygen_init(struct fman_kg_regs __iomem *keygen_regs)
+{
+ struct fman_keygen *keygen;
+ u32 ar;
+ int i;
+
+ /* Allocate memory for KeyGen driver */
+ keygen = kzalloc(sizeof(*keygen), GFP_KERNEL);
+ if (!keygen)
+ return NULL;
+
+ keygen->keygen_regs = keygen_regs;
+
+ /* KeyGen initialization (for Master partition):
+ * Setup KeyGen registers
+ */
+ iowrite32be(ENQUEUE_KG_DFLT_NIA, &keygen_regs->fmkg_gcr);
+
+ iowrite32be(FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW,
+ &keygen_regs->fmkg_eer);
+
+ iowrite32be(0, &keygen_regs->fmkg_fdor);
+ iowrite32be(0, &keygen_regs->fmkg_gdv0r);
+ iowrite32be(0, &keygen_regs->fmkg_gdv1r);
+
+ /* Clear binding between ports to schemes and classification plans
+ * so that all ports are not bound to any scheme/classification plan
+ */
+ for (i = 0; i < FMAN_MAX_NUM_OF_HW_PORTS; i++) {
+ /* Clear all pe sp schemes registers */
+ keygen_write_sp(keygen_regs, 0xffffffff, false);
+ ar = build_ar_bind_scheme(i, true);
+ keygen_write_ar_wait(keygen_regs, ar);
+
+ /* Clear all pe cpp classification plans registers */
+ keygen_write_cpp(keygen_regs, 0);
+ ar = build_ar_bind_cls_plan(i, true);
+ keygen_write_ar_wait(keygen_regs, ar);
+ }
+
+ /* Enable all scheme interrupts */
+ iowrite32be(0xFFFFFFFF, &keygen_regs->fmkg_seer);
+ iowrite32be(0xFFFFFFFF, &keygen_regs->fmkg_seeer);
+
+ /* Enable KyeGen */
+ iowrite32be(ioread32be(&keygen_regs->fmkg_gcr) | FM_KG_KGGCR_EN,
+ &keygen_regs->fmkg_gcr);
+
+ return keygen;
+}
+EXPORT_SYMBOL(keygen_init);
+
+/* keygen_port_hashing_init
+ *
+ * Initializes a port for Rx Hashing with specified configuration parameters
+ *
+ * keygen: KeyGen handle
+ * hw_port_id: HW Port ID
+ * hash_base_fqid: Hashing Base FQID used for spreading
+ * hash_size: Hashing size
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+int keygen_port_hashing_init(struct fman_keygen *keygen, u8 hw_port_id,
+ u32 hash_base_fqid, u32 hash_size)
+{
+ struct keygen_scheme *scheme;
+ u8 scheme_id;
+ int err;
+
+ /* Validate Scheme configuration parameters */
+ if (hash_base_fqid == 0 || (hash_base_fqid & ~0x00FFFFFF)) {
+ pr_err("Base FQID must be between 1 and 2^24-1\n");
+ return -EINVAL;
+ }
+ if (hash_size == 0 || (hash_size & (hash_size - 1)) != 0) {
+ pr_err("Hash size must be power of two\n");
+ return -EINVAL;
+ }
+
+ /* Find a free scheme */
+ err = get_free_scheme_id(keygen, &scheme_id);
+ if (err) {
+ pr_err("The maximum number of available Schemes has been exceeded\n");
+ return -EINVAL;
+ }
+
+ /* Create and configure Hard-Coded Scheme: */
+
+ scheme = get_scheme(keygen, scheme_id);
+ if (!scheme) {
+ pr_err("Requested Scheme does not exist\n");
+ return -EINVAL;
+ }
+ if (scheme->used) {
+ pr_err("The requested Scheme is already used\n");
+ return -EINVAL;
+ }
+
+ /* Clear all scheme fields because the scheme may have been
+ * previously used
+ */
+ memset(scheme, 0, sizeof(struct keygen_scheme));
+
+ /* Setup scheme: */
+ scheme->hw_port_id = hw_port_id;
+ scheme->use_hashing = true;
+ scheme->base_fqid = hash_base_fqid;
+ scheme->hash_fqid_count = hash_size;
+ scheme->symmetric_hash = DEFAULT_SYMMETRIC_HASH;
+ scheme->hashShift = DEFAULT_HASH_SHIFT;
+
+ /* All Schemes in hard-coded configuration
+ * are Indirect Schemes
+ */
+ scheme->match_vector = 0;
+
+ err = keygen_scheme_setup(keygen, scheme_id, true);
+ if (err != 0) {
+ pr_err("Scheme setup failed\n");
+ return err;
+ }
+
+ /* Bind Rx port to Scheme */
+ err = keygen_bind_port_to_schemes(keygen, scheme_id, true);
+ if (err != 0) {
+ pr_err("Binding port to schemes failed\n");
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(keygen_port_hashing_init);
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.h b/drivers/net/ethernet/freescale/fman/fman_keygen.h
new file mode 100644
index 000000000000..c4640de3f4cb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of NXP nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __KEYGEN_H
+#define __KEYGEN_H
+
+#include <linux/io.h>
+
+struct fman_keygen;
+struct fman_kg_regs;
+
+struct fman_keygen *keygen_init(struct fman_kg_regs __iomem *keygen_regs);
+
+int keygen_port_hashing_init(struct fman_keygen *keygen, u8 hw_port_id,
+ u32 hash_base_fqid, u32 hash_size);
+
+#endif /* __KEYGEN_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 57bf44fa16a1..1789b206be58 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -32,10 +32,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "fman_port.h"
-#include "fman.h"
-#include "fman_sp.h"
-
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -45,6 +41,11 @@
#include <linux/delay.h>
#include <linux/libfdt_env.h>
+#include "fman.h"
+#include "fman_port.h"
+#include "fman_sp.h"
+#include "fman_keygen.h"
+
/* Queue ID */
#define DFLT_FQ_ID 0x00FFFFFF
@@ -184,6 +185,7 @@
#define NIA_ENG_QMI_ENQ 0x00540000
#define NIA_ENG_QMI_DEQ 0x00580000
#define NIA_ENG_HWP 0x00440000
+#define NIA_ENG_HWK 0x00480000
#define NIA_BMI_AC_ENQ_FRAME 0x00000002
#define NIA_BMI_AC_TX_RELEASE 0x000002C0
#define NIA_BMI_AC_RELEASE 0x000000C0
@@ -394,6 +396,8 @@ struct fman_port_bpools {
struct fman_port_cfg {
u32 dflt_fqid;
u32 err_fqid;
+ u32 pcd_base_fqid;
+ u32 pcd_fqs_count;
u8 deq_sp;
bool deq_high_priority;
enum fman_port_deq_type deq_type;
@@ -1271,6 +1275,10 @@ static void set_rx_dflt_cfg(struct fman_port *port,
port_params->specific_params.rx_params.err_fqid;
port->cfg->dflt_fqid =
port_params->specific_params.rx_params.dflt_fqid;
+ port->cfg->pcd_base_fqid =
+ port_params->specific_params.rx_params.pcd_base_fqid;
+ port->cfg->pcd_fqs_count =
+ port_params->specific_params.rx_params.pcd_fqs_count;
}
static void set_tx_dflt_cfg(struct fman_port *port,
@@ -1398,6 +1406,24 @@ err_port_cfg:
EXPORT_SYMBOL(fman_port_config);
/**
+ * fman_port_use_kg_hash
+ * port: A pointer to a FM Port module.
+ * Sets the HW KeyGen or the BMI as HW Parser next engine, enabling
+ * or bypassing the KeyGen hashing of Rx traffic
+ */
+void fman_port_use_kg_hash(struct fman_port *port, bool enable)
+{
+ if (enable)
+ /* After the Parser frames go to KeyGen */
+ iowrite32be(NIA_ENG_HWK, &port->bmi_regs->rx.fmbm_rfpne);
+ else
+ /* After the Parser frames go to BMI */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME,
+ &port->bmi_regs->rx.fmbm_rfpne);
+}
+EXPORT_SYMBOL(fman_port_use_kg_hash);
+
+/**
* fman_port_init
* port: A pointer to a FM Port module.
* Initializes the FM PORT module by defining the software structure and
@@ -1407,9 +1433,10 @@ EXPORT_SYMBOL(fman_port_config);
*/
int fman_port_init(struct fman_port *port)
{
+ struct fman_port_init_params params;
+ struct fman_keygen *keygen;
struct fman_port_cfg *cfg;
int err;
- struct fman_port_init_params params;
if (is_init_done(port->cfg))
return -EINVAL;
@@ -1472,6 +1499,17 @@ int fman_port_init(struct fman_port *port)
if (err)
return err;
+ if (port->cfg->pcd_fqs_count) {
+ keygen = port->dts_params.fman->keygen;
+ err = keygen_port_hashing_init(keygen, port->port_id,
+ port->cfg->pcd_base_fqid,
+ port->cfg->pcd_fqs_count);
+ if (err)
+ return err;
+
+ fman_port_use_kg_hash(port, true);
+ }
+
kfree(port->cfg);
port->cfg = NULL;
@@ -1682,6 +1720,17 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port)
}
EXPORT_SYMBOL(fman_port_get_qman_channel_id);
+int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
+{
+ if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)
+ return -EINVAL;
+
+ *offset = port->buffer_offsets.hash_result_offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_get_hash_result_offset);
+
static int fman_port_probe(struct platform_device *of_dev)
{
struct fman_port *port;
@@ -1720,8 +1769,8 @@ static int fman_port_probe(struct platform_device *of_dev)
err = of_property_read_u32(port_node, "cell-index", &val);
if (err) {
- dev_err(port->dev, "%s: reading cell-index for %s failed\n",
- __func__, port_node->full_name);
+ dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
+ __func__, port_node);
err = -EINVAL;
goto return_err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 8ba901737048..e86ca6a34e4e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -100,6 +100,9 @@ struct fman_port;
struct fman_port_rx_params {
u32 err_fqid; /* Error Queue Id. */
u32 dflt_fqid; /* Default Queue Id. */
+ u32 pcd_base_fqid; /* PCD base Queue Id. */
+ u32 pcd_fqs_count; /* Number of PCD FQs. */
+
/* Which external buffer pools are used
* (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes.
*/
@@ -134,6 +137,8 @@ struct fman_port_params {
int fman_port_config(struct fman_port *port, struct fman_port_params *params);
+void fman_port_use_kg_hash(struct fman_port *port, bool enable);
+
int fman_port_init(struct fman_port *port);
int fman_port_cfg_buf_prefix_content(struct fman_port *port,
@@ -146,6 +151,8 @@ int fman_port_enable(struct fman_port *port);
u32 fman_port_get_qman_channel_id(struct fman_port *port);
+int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset);
+
struct fman_port *fman_port_bind(struct device *dev);
#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 1c7da16ad0ff..387eb4a88b72 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -700,8 +700,8 @@ static int mac_probe(struct platform_device *_of_dev)
priv->internal_phy_node = of_parse_phandle(mac_node,
"pcsphy-handle", 0);
} else {
- dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
- mac_node->full_name);
+ dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n",
+ mac_node);
err = -EINVAL;
goto _return;
}
@@ -714,16 +714,15 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the FM node */
dev_node = of_get_parent(mac_node);
if (!dev_node) {
- dev_err(dev, "of_get_parent(%s) failed\n",
- mac_node->full_name);
+ dev_err(dev, "of_get_parent(%pOF) failed\n",
+ mac_node);
err = -EINVAL;
goto _return_dev_set_drvdata;
}
of_dev = of_find_device_by_node(dev_node);
if (!of_dev) {
- dev_err(dev, "of_find_device_by_node(%s) failed\n",
- dev_node->full_name);
+ dev_err(dev, "of_find_device_by_node(%pOF) failed\n", dev_node);
err = -EINVAL;
goto _return_of_node_put;
}
@@ -731,8 +730,7 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the FMan cell-index */
err = of_property_read_u32(dev_node, "cell-index", &val);
if (err) {
- dev_err(dev, "failed to read cell-index for %s\n",
- dev_node->full_name);
+ dev_err(dev, "failed to read cell-index for %pOF\n", dev_node);
err = -EINVAL;
goto _return_of_node_put;
}
@@ -741,7 +739,7 @@ static int mac_probe(struct platform_device *_of_dev)
priv->fman = fman_bind(&of_dev->dev);
if (!priv->fman) {
- dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name);
+ dev_err(dev, "fman_bind(%pOF) failed\n", dev_node);
err = -ENODEV;
goto _return_of_node_put;
}
@@ -751,8 +749,8 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the address of the memory mapped registers */
err = of_address_to_resource(mac_node, 0, &res);
if (err < 0) {
- dev_err(dev, "of_address_to_resource(%s) = %d\n",
- mac_node->full_name, err);
+ dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
+ mac_node, err);
goto _return_dev_set_drvdata;
}
@@ -786,8 +784,7 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the cell-index */
err = of_property_read_u32(mac_node, "cell-index", &val);
if (err) {
- dev_err(dev, "failed to read cell-index for %s\n",
- mac_node->full_name);
+ dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
err = -EINVAL;
goto _return_dev_set_drvdata;
}
@@ -796,8 +793,7 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the MAC address */
mac_addr = of_get_mac_address(mac_node);
if (!mac_addr) {
- dev_err(dev, "of_get_mac_address(%s) failed\n",
- mac_node->full_name);
+ dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
err = -EINVAL;
goto _return_dev_set_drvdata;
}
@@ -806,15 +802,15 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the port handles */
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
if (unlikely(nph < 0)) {
- dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n",
- mac_node->full_name);
+ dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
+ mac_node);
err = nph;
goto _return_dev_set_drvdata;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
- dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n",
- mac_node->full_name);
+ dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
+ mac_node);
err = -EINVAL;
goto _return_dev_set_drvdata;
}
@@ -823,24 +819,24 @@ static int mac_probe(struct platform_device *_of_dev)
/* Find the port node */
dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
if (!dev_node) {
- dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n",
- mac_node->full_name);
+ dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
+ mac_node);
err = -EINVAL;
goto _return_of_node_put;
}
of_dev = of_find_device_by_node(dev_node);
if (!of_dev) {
- dev_err(dev, "of_find_device_by_node(%s) failed\n",
- dev_node->full_name);
+ dev_err(dev, "of_find_device_by_node(%pOF) failed\n",
+ dev_node);
err = -EINVAL;
goto _return_of_node_put;
}
mac_dev->port[i] = fman_port_bind(&of_dev->dev);
if (!mac_dev->port[i]) {
- dev_err(dev, "dev_get_drvdata(%s) failed\n",
- dev_node->full_name);
+ dev_err(dev, "dev_get_drvdata(%pOF) failed\n",
+ dev_node);
err = -EINVAL;
goto _return_of_node_put;
}
@@ -851,8 +847,8 @@ static int mac_probe(struct platform_device *_of_dev)
phy_if = of_get_phy_mode(mac_node);
if (phy_if < 0) {
dev_warn(dev,
- "of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
- mac_node->full_name);
+ "of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n",
+ mac_node);
phy_if = PHY_INTERFACE_MODE_SGMII;
}
priv->phy_if = phy_if;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 1f015edcca22..c8e5d889bd81 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -100,7 +100,7 @@ static inline void mdc(struct mdiobb_ctrl *ctrl, int what)
in_be32(bitbang->dat);
}
-static struct mdiobb_ops bb_ops = {
+static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = mdc,
.set_mdio_dir = mdio_dir,
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index a10de1e9c157..80ad16acf0f1 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -267,8 +267,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
ret = of_address_to_resource(np, 0, &res);
if (ret < 0) {
- pr_debug("fsl-pq-mdio: no address range in node %s\n",
- np->full_name);
+ pr_debug("fsl-pq-mdio: no address range in node %pOF\n",
+ np);
continue;
}
@@ -280,8 +280,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
if (!iprop) {
iprop = of_get_property(np, "device-id", NULL);
if (!iprop) {
- pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
- np->full_name);
+ pr_debug("fsl-pq-mdio: no UCC ID in node %pOF\n",
+ np);
continue;
}
}
@@ -293,8 +293,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
* numbered from 1, not 0.
*/
if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
- pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
- np->full_name);
+ pr_debug("fsl-pq-mdio: invalid UCC ID in node %pOF\n",
+ np);
continue;
}
@@ -442,8 +442,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
if (strcmp(tbi->type, "tbi-phy") == 0) {
- dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
- strrchr(tbi->full_name, '/') + 1);
+ dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n",
+ tbi);
break;
}
}
@@ -454,8 +454,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
if (!prop) {
dev_err(&pdev->dev,
- "missing 'reg' property in node %s\n",
- tbi->full_name);
+ "missing 'reg' property in node %pOF\n",
+ tbi);
err = -EBUSY;
goto error;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index c4b4b0a1bbf0..5be52d89b182 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3687,7 +3687,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
- u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
+ u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 721be13081f9..544114281ea7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -411,7 +411,7 @@ static int ptp_gianfar_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
-static struct ptp_clock_info ptp_gianfar_caps = {
+static const struct ptp_clock_info ptp_gianfar_caps = {
.owner = THIS_MODULE,
.name = "gianfar clock",
.max_adj = 512000,