summaryrefslogtreecommitdiff
path: root/drivers/net/dsa/bcm_sf2_cfp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 13:04:52 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 13:04:52 -0800
commite0c38a4d1f196a4b17d2eba36afff8f656a4f1de (patch)
treeb26a69fabef0160adb127416a9744217700feeb7 /drivers/net/dsa/bcm_sf2_cfp.c
parent7f9f852c75e7d776b078813586c76a2bc7dca993 (diff)
parent90cadbbf341dd5b2df991c33a6bd6341f3a53788 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) New ipset extensions for matching on destination MAC addresses, from Stefano Brivio. 2) Add ipv4 ttl and tos, plus ipv6 flow label and hop limit offloads to nfp driver. From Stefano Brivio. 3) Implement GRO for plain UDP sockets, from Paolo Abeni. 4) Lots of work from Michał Mirosław to eliminate the VLAN_TAG_PRESENT bit so that we could support the entire vlan_tci value. 5) Rework the IPSEC policy lookups to better optimize more usecases, from Florian Westphal. 6) Infrastructure changes eliminating direct manipulation of SKB lists wherever possible, and to always use the appropriate SKB list helpers. This work is still ongoing... 7) Lots of PHY driver and state machine improvements and simplifications, from Heiner Kallweit. 8) Various TSO deferral refinements, from Eric Dumazet. 9) Add ntuple filter support to aquantia driver, from Dmitry Bogdanov. 10) Batch dropping of XDP packets in tuntap, from Jason Wang. 11) Lots of cleanups and improvements to the r8169 driver from Heiner Kallweit, including support for ->xmit_more. This driver has been getting some much needed love since he started working on it. 12) Lots of new forwarding selftests from Petr Machata. 13) Enable VXLAN learning in mlxsw driver, from Ido Schimmel. 14) Packed ring support for virtio, from Tiwei Bie. 15) Add new Aquantia AQtion USB driver, from Dmitry Bezrukov. 16) Add XDP support to dpaa2-eth driver, from Ioana Ciocoi Radulescu. 17) Implement coalescing on TCP backlog queue, from Eric Dumazet. 18) Implement carrier change in tun driver, from Nicolas Dichtel. 19) Support msg_zerocopy in UDP, from Willem de Bruijn. 20) Significantly improve garbage collection of neighbor objects when the table has many PERMANENT entries, from David Ahern. 21) Remove egdev usage from nfp and mlx5, and remove the facility completely from the tree as it no longer has any users. From Oz Shlomo and others. 22) Add a NETDEV_PRE_CHANGEADDR so that drivers can veto the change and therefore abort the operation before the commit phase (which is the NETDEV_CHANGEADDR event). From Petr Machata. 23) Add indirect call wrappers to avoid retpoline overhead, and use them in the GRO code paths. From Paolo Abeni. 24) Add support for netlink FDB get operations, from Roopa Prabhu. 25) Support bloom filter in mlxsw driver, from Nir Dotan. 26) Add SKB extension infrastructure. This consolidates the handling of the auxiliary SKB data used by IPSEC and bridge netfilter, and is designed to support the needs to MPTCP which could be integrated in the future. 27) Lots of XDP TX optimizations in mlx5 from Tariq Toukan. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1845 commits) net: dccp: fix kernel crash on module load drivers/net: appletalk/cops: remove redundant if statement and mask bnx2x: Fix NULL pointer dereference in bnx2x_del_all_vlans() on some hw net/net_namespace: Check the return value of register_pernet_subsys() net/netlink_compat: Fix a missing check of nla_parse_nested ieee802154: lowpan_header_create check must check daddr net/mlx4_core: drop useless LIST_HEAD mlxsw: spectrum: drop useless LIST_HEAD net/mlx5e: drop useless LIST_HEAD iptunnel: Set tun_flags in the iptunnel_metadata_reply from src net/mlx5e: fix semicolon.cocci warnings staging: octeon: fix build failure with XFRM enabled net: Revert recent Spectre-v1 patches. can: af_can: Fix Spectre v1 vulnerability packet: validate address length if non-zero nfc: af_nfc: Fix Spectre v1 vulnerability phonet: af_phonet: Fix Spectre v1 vulnerability net: core: Fix Spectre v1 vulnerability net: minor cleanup in skb_ext_add() net: drop the unused helper skb_ext_get() ...
Diffstat (limited to 'drivers/net/dsa/bcm_sf2_cfp.c')
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c497
1 files changed, 182 insertions, 315 deletions
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 47c5f272a084..e14663ab6dbc 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -20,6 +20,12 @@
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
+struct cfp_rule {
+ int port;
+ struct ethtool_rx_flow_spec fs;
+ struct list_head next;
+};
+
struct cfp_udf_slice_layout {
u8 slices[UDFS_PER_SLICE];
u32 mask_value;
@@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
core_writel(priv, reg, offset);
}
+static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
+ int port, u32 location)
+{
+ struct cfp_rule *rule = NULL;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ if (rule->port == port && rule->fs.location == location)
+ break;
+ }
+
+ return rule;
+}
+
+static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct cfp_rule *rule = NULL;
+ size_t fs_size = 0;
+ int ret = 1;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = 1;
+ if (rule->port != port)
+ continue;
+
+ if (rule->fs.flow_type != fs->flow_type ||
+ rule->fs.ring_cookie != fs->ring_cookie ||
+ rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+ continue;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip6_spec);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip4_spec);
+ break;
+ default:
+ continue;
+ }
+
+ ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
+ ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
+ if (ret == 0)
+ break;
+ }
+
+ return ret;
+}
+
static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int port_num,
unsigned int queue_num,
@@ -728,27 +789,14 @@ out_err:
return ret;
}
-static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
- struct ethtool_rx_flow_spec *fs)
+static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
- int ret = -EINVAL;
-
- /* Check for unsupported extensions */
- if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
- fs->m_ext.data[1]))
- return -EINVAL;
-
- if (fs->location != RX_CLS_LOC_ANY &&
- test_bit(fs->location, priv->cfp.used))
- return -EBUSY;
-
- if (fs->location != RX_CLS_LOC_ANY &&
- fs->location > bcm_sf2_cfp_rule_size(priv))
- return -EINVAL;
+ int ret;
/* This rule is a Wake-on-LAN filter and we must specifically
* target the CPU port in order for it to be working.
@@ -787,12 +835,54 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
queue_num, fs);
break;
default:
+ ret = -EINVAL;
break;
}
return ret;
}
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule = NULL;
+ int ret = -EINVAL;
+
+ /* Check for unsupported extensions */
+ if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
+ fs->m_ext.data[1]))
+ return -EINVAL;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ test_bit(fs->location, priv->cfp.used))
+ return -EBUSY;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
+ return -EINVAL;
+
+ ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
+ if (ret == 0)
+ return -EEXIST;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->port = port;
+ memcpy(&rule->fs, fs, sizeof(*fs));
+ list_add_tail(&rule->next, &priv->cfp.rules_list);
+
+ return ret;
+}
+
static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
u32 loc, u32 *next_loc)
{
@@ -830,19 +920,12 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
return 0;
}
-static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
- u32 loc)
+static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
+ u32 loc)
{
u32 next_loc = 0;
int ret;
- /* Refuse deleting unused rules, and those that are not unique since
- * that could leave IPv6 rules with one of the chained rule in the
- * table.
- */
- if (!test_bit(loc, priv->cfp.unique) || loc == 0)
- return -EINVAL;
-
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
if (ret)
return ret;
@@ -854,318 +937,54 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
return ret;
}
-static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
{
- unsigned int i;
-
- for (i = 0; i < sizeof(flow->m_u); i++)
- flow->m_u.hdata[i] ^= 0xff;
-
- flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
- flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
- flow->m_ext.data[0] ^= cpu_to_be32(~0);
- flow->m_ext.data[1] ^= cpu_to_be32(~0);
-}
-
-static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
- struct ethtool_tcpip4_spec *v4_spec,
- bool mask)
-{
- u32 reg, offset, ipv4;
- u16 src_dst_port;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(3);
- else
- offset = CORE_CFP_DATA_PORT(3);
-
- reg = core_readl(priv, offset);
- /* src port [15:8] */
- src_dst_port = reg << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(2);
- else
- offset = CORE_CFP_DATA_PORT(2);
-
- reg = core_readl(priv, offset);
- /* src port [7:0] */
- src_dst_port |= (reg >> 24);
-
- v4_spec->pdst = cpu_to_be16(src_dst_port);
- v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
-
- /* IPv4 dst [15:8] */
- ipv4 = (reg & 0xff) << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(1);
- else
- offset = CORE_CFP_DATA_PORT(1);
-
- reg = core_readl(priv, offset);
- /* IPv4 dst [31:16] */
- ipv4 |= ((reg >> 8) & 0xffff) << 16;
- /* IPv4 dst [7:0] */
- ipv4 |= (reg >> 24) & 0xff;
- v4_spec->ip4dst = cpu_to_be32(ipv4);
-
- /* IPv4 src [15:8] */
- ipv4 = (reg & 0xff) << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(0);
- else
- offset = CORE_CFP_DATA_PORT(0);
- reg = core_readl(priv, offset);
+ struct cfp_rule *rule;
+ int ret;
- /* Once the TCAM is programmed, the mask reflects the slice number
- * being matched, don't bother checking it when reading back the
- * mask spec
+ /* Refuse deleting unused rules, and those that are not unique since
+ * that could leave IPv6 rules with one of the chained rule in the
+ * table.
*/
- if (!mask && !(reg & SLICE_VALID))
+ if (!test_bit(loc, priv->cfp.unique) || loc == 0)
return -EINVAL;
- /* IPv4 src [7:0] */
- ipv4 |= (reg >> 24) & 0xff;
- /* IPv4 src [31:16] */
- ipv4 |= ((reg >> 8) & 0xffff) << 16;
- v4_spec->ip4src = cpu_to_be32(ipv4);
-
- return 0;
-}
-
-static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rx_flow_spec *fs)
-{
- struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
- u32 reg;
- int ret;
-
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
- switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
- case IPPROTO_TCP:
- fs->flow_type = TCP_V4_FLOW;
- v4_spec = &fs->h_u.tcp_ip4_spec;
- v4_m_spec = &fs->m_u.tcp_ip4_spec;
- break;
- case IPPROTO_UDP:
- fs->flow_type = UDP_V4_FLOW;
- v4_spec = &fs->h_u.udp_ip4_spec;
- v4_m_spec = &fs->m_u.udp_ip4_spec;
- break;
- default:
+ rule = bcm_sf2_cfp_rule_find(priv, port, loc);
+ if (!rule)
return -EINVAL;
- }
-
- fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
- v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
-
- ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
- if (ret)
- return ret;
-
- return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
-}
-
-static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
- __be32 *ip6_addr, __be16 *port,
- bool mask)
-{
- u32 reg, tmp, offset;
-
- /* C-Tag [31:24]
- * UDF_n_B8 [23:8] (port)
- * UDF_n_B7 (upper) [7:0] (addr[15:8])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(4);
- else
- offset = CORE_CFP_DATA_PORT(4);
- reg = core_readl(priv, offset);
- *port = cpu_to_be32(reg) >> 8;
- tmp = (u32)(reg & 0xff) << 8;
-
- /* UDF_n_B7 (lower) [31:24] (addr[7:0])
- * UDF_n_B6 [23:8] (addr[31:16])
- * UDF_n_B5 (upper) [7:0] (addr[47:40])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(3);
- else
- offset = CORE_CFP_DATA_PORT(3);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[3] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
-
- /* UDF_n_B5 (lower) [31:24] (addr[39:32])
- * UDF_n_B4 [23:8] (addr[63:48])
- * UDF_n_B3 (upper) [7:0] (addr[79:72])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(2);
- else
- offset = CORE_CFP_DATA_PORT(2);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[2] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
- /* UDF_n_B3 (lower) [31:24] (addr[71:64])
- * UDF_n_B2 [23:8] (addr[95:80])
- * UDF_n_B1 (upper) [7:0] (addr[111:104])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(1);
- else
- offset = CORE_CFP_DATA_PORT(1);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[1] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
+ ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
- /* UDF_n_B1 (lower) [31:24] (addr[103:96])
- * UDF_n_B0 [23:8] (addr[127:112])
- * Reserved [7:4]
- * Slice ID [3:2]
- * Slice valid [1:0]
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(0);
- else
- offset = CORE_CFP_DATA_PORT(0);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[0] = cpu_to_be32(tmp);
+ list_del(&rule->next);
+ kfree(rule);
- if (!mask && !(reg & SLICE_VALID))
- return -EINVAL;
-
- return 0;
+ return ret;
}
-static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rx_flow_spec *fs,
- u32 next_loc)
+static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
{
- struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
- u32 reg;
- int ret;
-
- /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
- * assuming tcp_ip6_spec here being an union.
- */
- v6_spec = &fs->h_u.tcp_ip6_spec;
- v6_m_spec = &fs->m_u.tcp_ip6_spec;
-
- /* Read the second half first */
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
- false);
- if (ret)
- return ret;
-
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
- &v6_m_spec->pdst, true);
- if (ret)
- return ret;
-
- /* Read last to avoid next entry clobbering the results during search
- * operations. We would not have the port enabled for this rule, so
- * don't bother checking it.
- */
- (void)core_readl(priv, CORE_CFP_DATA_PORT(7));
-
- /* The slice number is valid, so read the rule we are chained from now
- * which is our first half.
- */
- bcm_sf2_cfp_rule_addr_set(priv, next_loc);
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
- if (ret)
- return ret;
-
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
- switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
- case IPPROTO_TCP:
- fs->flow_type = TCP_V6_FLOW;
- break;
- case IPPROTO_UDP:
- fs->flow_type = UDP_V6_FLOW;
- break;
- default:
- return -EINVAL;
- }
+ unsigned int i;
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc,
- false);
- if (ret)
- return ret;
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xff;
- return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src,
- &v6_m_spec->psrc, true);
+ flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
struct ethtool_rxnfc *nfc)
{
- u32 reg, ipv4_or_chain_id;
- unsigned int queue_num;
- int ret;
-
- bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
-
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
- if (ret)
- return ret;
-
- reg = core_readl(priv, CORE_ACT_POL_DATA0);
-
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
- if (ret)
- return ret;
-
- /* Extract the destination port */
- nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
- DST_MAP_IB_MASK) - 1;
-
- /* There is no Port 6, so we compensate for that here */
- if (nfc->fs.ring_cookie >= 6)
- nfc->fs.ring_cookie++;
- nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
-
- /* Extract the destination queue */
- queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
- nfc->fs.ring_cookie += queue_num;
-
- /* Extract the L3_FRAMING or CHAIN_ID */
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+ struct cfp_rule *rule;
- /* With IPv6 rules this would contain a non-zero chain ID since
- * we reserve entry 0 and it cannot be used. So if we read 0 here
- * this means an IPv4 rule.
- */
- ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
- if (ipv4_or_chain_id == 0)
- ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
- else
- ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
- ipv4_or_chain_id);
- if (ret)
- return ret;
-
- /* Read last to avoid next entry clobbering the results during search
- * operations
- */
- reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
- if (!(reg & 1 << port))
+ rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
+ if (!rule)
return -EINVAL;
+ memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
+
bcm_sf2_invert_masks(&nfc->fs);
/* Put the TCAM size here */
@@ -1302,3 +1121,51 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
return 0;
}
+
+void bcm_sf2_cfp_exit(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule, *n;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return;
+
+ list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
+ bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
+}
+
+int bcm_sf2_cfp_resume(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule;
+ int ret = 0;
+ u32 reg;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg &= ~CFP_EN_MAP_MASK;
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ ret = bcm_sf2_cfp_rst(priv);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
+ rule->fs.location);
+ if (ret) {
+ dev_err(ds->dev, "failed to remove rule\n");
+ return ret;
+ }
+
+ ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
+ if (ret) {
+ dev_err(ds->dev, "failed to restore rule\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}