summaryrefslogtreecommitdiff
path: root/drivers/net/macsec.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/macsec.c')
-rw-r--r--drivers/net/macsec.c724
1 files changed, 400 insertions, 324 deletions
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index bf8ac7a3ded7..5200fd5a10e5 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -19,6 +19,7 @@
#include <net/gro_cells.h>
#include <net/macsec.h>
#include <net/dst_metadata.h>
+#include <net/netdev_lock.h>
#include <linux/phy.h>
#include <linux/byteorder/generic.h>
#include <linux/if_arp.h>
@@ -93,6 +94,8 @@ struct pcpu_secy_stats {
* @secys: linked list of SecY's on the underlying device
* @gro_cells: pointer to the Generic Receive Offload cell
* @offload: status of offloading on the MACsec device
+ * @insert_tx_tag: when offloading, device requires to insert an
+ * additional tag
*/
struct macsec_dev {
struct macsec_secy secy;
@@ -102,6 +105,7 @@ struct macsec_dev {
struct list_head secys;
struct gro_cells gro_cells;
enum macsec_offload offload;
+ bool insert_tx_tag;
};
/**
@@ -151,19 +155,6 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
-static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
-{
- struct macsec_rx_sa *sa = NULL;
- int an;
-
- for (an = 0; an < MACSEC_NUM_AN; an++) {
- sa = macsec_rxsa_get(rx_sc->sa[an]);
- if (sa)
- break;
- }
- return sa;
-}
-
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@@ -256,15 +247,39 @@ static sci_t make_sci(const u8 *addr, __be16 port)
return sci;
}
-static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
+static sci_t macsec_active_sci(struct macsec_secy *secy)
{
- sci_t sci;
+ struct macsec_rx_sc *rx_sc = rcu_dereference_bh(secy->rx_sc);
- if (sci_present)
+ /* Case single RX SC */
+ if (rx_sc && !rcu_dereference_bh(rx_sc->next))
+ return (rx_sc->active) ? rx_sc->sci : 0;
+ /* Case no RX SC or multiple */
+ else
+ return 0;
+}
+
+static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present,
+ struct macsec_rxh_data *rxd)
+{
+ struct macsec_dev *macsec;
+ sci_t sci = 0;
+
+ /* SC = 1 */
+ if (sci_present) {
memcpy(&sci, hdr->secure_channel_id,
sizeof(hdr->secure_channel_id));
- else
+ /* SC = 0; ES = 0 */
+ } else if ((!(hdr->tci_an & (MACSEC_TCI_ES | MACSEC_TCI_SC))) &&
+ (list_is_singular(&rxd->secys))) {
+ /* Only one SECY should exist on this scenario */
+ macsec = list_first_or_null_rcu(&rxd->secys, struct macsec_dev,
+ secys);
+ if (macsec)
+ return macsec_active_sci(&macsec->secy);
+ } else {
sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
+ }
return sci;
}
@@ -518,19 +533,13 @@ static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
static void count_tx(struct net_device *dev, int ret, int len)
{
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->tx_packets);
- u64_stats_add(&stats->tx_bytes, len);
- u64_stats_update_end(&stats->syncp);
- }
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
+ dev_sw_netstats_tx_add(dev, 1, len);
}
-static void macsec_encrypt_done(struct crypto_async_request *base, int err)
+static void macsec_encrypt_done(void *data, int err)
{
- struct sk_buff *skb = base->data;
+ struct sk_buff *skb = data;
struct net_device *dev = skb->dev;
struct macsec_dev *macsec = macsec_priv(dev);
struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
@@ -743,7 +752,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
- secy->netdev->stats.rx_dropped++;
+ DEV_STATS_INC(secy->netdev, rx_dropped);
return false;
}
@@ -767,7 +776,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
this_cpu_inc(rx_sa->stats->InPktsNotValid);
- secy->netdev->stats.rx_errors++;
+ DEV_STATS_INC(secy->netdev, rx_errors);
return false;
}
@@ -827,17 +836,12 @@ static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
static void count_rx(struct net_device *dev, int len)
{
- struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->rx_packets);
- u64_stats_add(&stats->rx_bytes, len);
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_rx_add(dev, len);
}
-static void macsec_decrypt_done(struct crypto_async_request *base, int err)
+static void macsec_decrypt_done(void *data, int err)
{
- struct sk_buff *skb = base->data;
+ struct sk_buff *skb = data;
struct net_device *dev = skb->dev;
struct macsec_dev *macsec = macsec_priv(dev);
struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
@@ -1007,10 +1011,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
+ bool is_macsec_md_dst;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
md_dst = skb_metadata_dst(skb);
+ is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1021,10 +1027,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
- if (md_dst && md_dst->type == METADATA_MACSEC &&
- (!find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci)))
+ const struct macsec_ops *ops;
+
+ ops = macsec_get_ops(macsec, NULL);
+
+ if (ops->rx_uses_md_dst && !is_macsec_md_dst)
continue;
+ if (is_macsec_md_dst) {
+ struct macsec_rx_sc *rx_sc;
+
+ /* All drivers that implement MACsec offload
+ * support using skb metadata destinations must
+ * indicate that they do so.
+ */
+ DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
+ rx_sc = find_rx_sc(&macsec->secy,
+ md_dst->u.macsec_info.sci);
+ if (!rx_sc)
+ continue;
+ /* device indicated macsec offload occurred */
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ eth_skb_pkt_type(skb, ndev);
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
+ }
+
+ /* This datapath is insecure because it is unable to
+ * enforce isolation of broadcast/multicast traffic and
+ * unicast traffic with promiscuous mode on the macsec
+ * netdev. Since the core stack has no mechanism to
+ * check that the hardware did indeed receive MACsec
+ * traffic, it is possible that the response handling
+ * done by the MACsec port was to a plaintext packet.
+ * This violates the MACsec protocol standard.
+ */
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1040,14 +1078,16 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
break;
nskb->dev = ndev;
- if (ether_addr_equal_64bits(hdr->h_dest,
- ndev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
- else
- nskb->pkt_type = PACKET_MULTICAST;
+ eth_skb_pkt_type(nskb, ndev);
__netif_rx(nskb);
+ } else if (ndev->flags & IFF_PROMISC) {
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
}
+
continue;
}
@@ -1059,7 +1099,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
continue;
}
@@ -1093,7 +1133,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
unsigned int len;
- sci_t sci;
+ sci_t sci = 0;
u32 hdr_pn;
bool cbit;
struct pcpu_rx_sc_stats *rxsc_stats;
@@ -1140,11 +1180,14 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
- sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
+ sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci, rxd);
+ if (!sci)
+ goto drop_nosc;
+
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
@@ -1169,7 +1212,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
- secy->netdev->stats.rx_errors++;
+ DEV_STATS_INC(secy->netdev, rx_errors);
goto drop_nosa;
}
@@ -1180,15 +1223,12 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
- struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
- secy->netdev->stats.rx_errors++;
- if (active_rx_sa)
- this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
+ DEV_STATS_INC(secy->netdev, rx_errors);
goto drop_nosa;
}
@@ -1198,8 +1238,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
- if (active_rx_sa)
- this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@@ -1220,7 +1258,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
goto drop;
}
}
@@ -1261,7 +1299,7 @@ deliver:
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
else
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
rcu_read_unlock();
@@ -1272,6 +1310,7 @@ drop:
macsec_rxsa_put(rx_sa);
drop_nosa:
macsec_rxsc_put(rx_sc);
+drop_nosc:
rcu_read_unlock();
drop_direct:
kfree_skb(skb);
@@ -1298,7 +1337,7 @@ nosci:
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
- macsec->secy.netdev->stats.rx_errors++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_errors);
continue;
}
@@ -1317,7 +1356,7 @@ nosci:
secy_stats->stats.InPktsUnknownSCI++;
u64_stats_update_end(&secy_stats->syncp);
} else {
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
}
}
@@ -1331,8 +1370,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (IS_ERR(tfm))
return tfm;
@@ -1545,9 +1583,6 @@ static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
if (IS_ERR(dev))
return ERR_CAST(dev);
- if (*assoc_num >= MACSEC_NUM_AN)
- return ERR_PTR(-EINVAL);
-
secy = &macsec_priv(dev)->secy;
tx_sc = &secy->tx_sc;
@@ -1608,8 +1643,6 @@ static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
return ERR_PTR(-EINVAL);
*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
- if (*assoc_num >= MACSEC_NUM_AN)
- return ERR_PTR(-EINVAL);
rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
if (IS_ERR(rx_sc))
@@ -1632,24 +1665,21 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
- [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
+ [MACSEC_RXSC_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
};
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
- [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
- [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
- .len = MACSEC_KEYID_LEN, },
- [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
- .len = MACSEC_MAX_KEY_LEN, },
+ [MACSEC_SA_ATTR_AN] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
+ [MACSEC_SA_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
+ [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN(NLA_UINT, 1),
+ [MACSEC_SA_ATTR_KEYID] = NLA_POLICY_EXACT_LEN(MACSEC_KEYID_LEN),
+ [MACSEC_SA_ATTR_KEY] = NLA_POLICY_MAX_LEN(MACSEC_MAX_KEY_LEN),
[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
- [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
- .len = MACSEC_SALT_LEN, },
+ [MACSEC_SA_ATTR_SALT] = NLA_POLICY_EXACT_LEN(MACSEC_SALT_LEN),
};
static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
- [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
+ [MACSEC_OFFLOAD_ATTR_TYPE] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
};
/* Offloads an operation to a device driver */
@@ -1701,21 +1731,6 @@ static bool validate_add_rxsa(struct nlattr **attrs)
!attrs[MACSEC_SA_ATTR_KEYID])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_PN] &&
- nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
- return false;
-
return true;
}
@@ -1774,14 +1789,6 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rtnl_unlock();
return -EINVAL;
}
-
- if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
- pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
- nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SALT_LEN);
- rtnl_unlock();
- return -EINVAL;
- }
}
rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
@@ -1806,7 +1813,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -1857,19 +1864,6 @@ cleanup:
return err;
}
-static bool validate_add_rxsc(struct nlattr **attrs)
-{
- if (!attrs[MACSEC_RXSC_ATTR_SCI])
- return false;
-
- if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- return true;
-}
-
static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev;
@@ -1887,7 +1881,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
if (parse_rxsc_config(attrs, tb_rxsc))
return -EINVAL;
- if (!validate_add_rxsc(tb_rxsc))
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
return -EINVAL;
rtnl_lock();
@@ -1946,20 +1940,6 @@ static bool validate_add_txsa(struct nlattr **attrs)
!attrs[MACSEC_SA_ATTR_KEYID])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
- return false;
-
return true;
}
@@ -2017,14 +1997,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
rtnl_unlock();
return -EINVAL;
}
-
- if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
- pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
- nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SALT_LEN);
- rtnl_unlock();
- return -EINVAL;
- }
}
tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
@@ -2048,7 +2020,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
}
spin_lock_bh(&tx_sa->lock);
- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
@@ -2301,17 +2273,6 @@ static bool validate_upd_sa(struct nlattr **attrs)
attrs[MACSEC_SA_ATTR_SALT])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
return true;
}
@@ -2360,7 +2321,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
spin_lock_bh(&tx_sa->lock);
prev_pn = tx_sa->next_pn_halves;
- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
}
@@ -2385,6 +2346,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
+ ctx.sa.update_pn = !!prev_pn.full64;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
@@ -2457,7 +2419,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
spin_lock_bh(&rx_sa->lock);
prev_pn = rx_sa->next_pn_halves;
- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -2478,6 +2440,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
+ ctx.sa.update_pn = !!prev_pn.full64;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
@@ -2516,7 +2479,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
if (parse_rxsc_config(attrs, tb_rxsc))
return -EINVAL;
- if (!validate_add_rxsc(tb_rxsc))
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
return -EINVAL;
rtnl_lock();
@@ -2583,16 +2546,103 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
return false;
}
+static bool macsec_needs_tx_tag(struct macsec_dev *macsec,
+ const struct macsec_ops *ops)
+{
+ return macsec->offload == MACSEC_OFFLOAD_PHY &&
+ ops->mdo_insert_tx_tag;
+}
+
+static void macsec_set_head_tail_room(struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+ int needed_headroom, needed_tailroom;
+ const struct macsec_ops *ops;
+
+ ops = macsec_get_ops(macsec, NULL);
+ if (ops) {
+ needed_headroom = ops->needed_headroom;
+ needed_tailroom = ops->needed_tailroom;
+ } else {
+ needed_headroom = MACSEC_NEEDED_HEADROOM;
+ needed_tailroom = MACSEC_NEEDED_TAILROOM;
+ }
+
+ dev->needed_headroom = real_dev->needed_headroom + needed_headroom;
+ dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
+}
+
+static void macsec_inherit_tso_max(struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+
+ /* if macsec is offloaded, we need to follow the lower
+ * device's capabilities. otherwise, we can ignore them.
+ */
+ if (macsec_is_offloaded(macsec))
+ netif_inherit_tso_max(dev, macsec->real_dev);
+}
+
+static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
+{
+ enum macsec_offload prev_offload;
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ struct macsec_dev *macsec;
+ int ret = 0;
+
+ macsec = macsec_priv(dev);
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (offload != MACSEC_OFFLOAD_OFF &&
+ !macsec_check_offload(offload, macsec))
+ return -EOPNOTSUPP;
+
+ /* Check if the net device is busy. */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Check if the device already has rules configured: we do not support
+ * rules migration.
+ */
+ if (macsec_is_configured(macsec))
+ return -EBUSY;
+
+ prev_offload = macsec->offload;
+
+ ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
+ macsec, &ctx);
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ macsec->offload = offload;
+
+ ctx.secy = &macsec->secy;
+ ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
+ : macsec_offload(ops->mdo_add_secy, &ctx);
+ if (ret) {
+ macsec->offload = prev_offload;
+ return ret;
+ }
+
+ macsec_set_head_tail_room(dev);
+ macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
+
+ macsec_inherit_tso_max(dev);
+
+ netdev_update_features(dev);
+
+ return ret;
+}
+
static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
- enum macsec_offload offload, prev_offload;
- int (*func)(struct macsec_context *ctx);
struct nlattr **attrs = info->attrs;
- struct net_device *dev;
- const struct macsec_ops *ops;
- struct macsec_context ctx;
+ enum macsec_offload offload;
struct macsec_dev *macsec;
+ struct net_device *dev;
int ret = 0;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -2621,55 +2671,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
}
offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
- if (macsec->offload == offload)
- goto out;
-
- /* Check if the offloading mode is supported by the underlying layers */
- if (offload != MACSEC_OFFLOAD_OFF &&
- !macsec_check_offload(offload, macsec)) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- /* Check if the net device is busy. */
- if (netif_running(dev)) {
- ret = -EBUSY;
- goto out;
- }
-
- prev_offload = macsec->offload;
- macsec->offload = offload;
-
- /* Check if the device already has rules configured: we do not support
- * rules migration.
- */
- if (macsec_is_configured(macsec)) {
- ret = -EBUSY;
- goto rollback;
- }
-
- ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
- macsec, &ctx);
- if (!ops) {
- ret = -EOPNOTSUPP;
- goto rollback;
- }
- if (prev_offload == MACSEC_OFFLOAD_OFF)
- func = ops->mdo_add_secy;
- else
- func = ops->mdo_del_secy;
-
- ctx.secy = &macsec->secy;
- ret = macsec_offload(func, &ctx);
- if (ret)
- goto rollback;
-
- rtnl_unlock();
- return 0;
-
-rollback:
- macsec->offload = prev_offload;
+ if (macsec->offload != offload)
+ ret = macsec_update_offload(dev, offload);
out:
rtnl_unlock();
return ret;
@@ -3385,6 +3389,40 @@ static struct genl_family macsec_fam __ro_after_init = {
.resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1,
};
+static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ const struct macsec_ops *ops;
+ struct phy_device *phydev;
+ struct macsec_context ctx;
+ int skb_final_len;
+ int err;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom +
+ ops->needed_tailroom;
+ if (unlikely(skb_final_len > macsec->real_dev->mtu)) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ phydev = macsec->real_dev->phydev;
+
+ err = skb_ensure_writable_head_tail(skb, dev);
+ if (unlikely(err < 0))
+ goto cleanup;
+
+ err = ops->mdo_insert_tx_tag(phydev, skb);
+ if (unlikely(err))
+ goto cleanup;
+
+ return skb;
+cleanup:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -3399,6 +3437,15 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
skb_dst_drop(skb);
dst_hold(&md_dst->dst);
skb_dst_set(skb, &md_dst->dst);
+
+ if (macsec->insert_tx_tag) {
+ skb = macsec_insert_tx_tag(skb, dev);
+ if (IS_ERR(skb)) {
+ DEV_STATS_INC(dev, tx_dropped);
+ return NETDEV_TX_OK;
+ }
+ }
+
skb->dev = macsec->real_dev;
return dev_queue_xmit(skb);
}
@@ -3418,7 +3465,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
if (!secy->operational) {
kfree_skb(skb);
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
@@ -3426,7 +3473,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
@@ -3441,29 +3488,31 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+#define MACSEC_OFFLOAD_FEATURES \
+ (MACSEC_FEATURES | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES | \
+ NETIF_F_LRO | NETIF_F_RXHASH | NETIF_F_CSUM_MASK | NETIF_F_RXCSUM)
+
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&macsec->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
- dev->features = real_dev->features & MACSEC_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ macsec_inherit_tso_max(dev);
+
+ dev->hw_features = real_dev->hw_features & MACSEC_OFFLOAD_FEATURES;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- dev->needed_headroom = real_dev->needed_headroom +
- MACSEC_NEEDED_HEADROOM;
- dev->needed_tailroom = real_dev->needed_tailroom +
- MACSEC_NEEDED_TAILROOM;
+ dev->features = real_dev->features & MACSEC_OFFLOAD_FEATURES;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->lltx = true;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
+ macsec_set_head_tail_room(dev);
if (is_zero_ether_addr(dev->dev_addr))
eth_hw_addr_inherit(dev, real_dev);
@@ -3481,7 +3530,6 @@ static void macsec_dev_uninit(struct net_device *dev)
struct macsec_dev *macsec = macsec_priv(dev);
gro_cells_destroy(&macsec->gro_cells);
- free_percpu(dev->tstats);
}
static netdev_features_t macsec_fix_features(struct net_device *dev,
@@ -3489,10 +3537,13 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
+ netdev_features_t mask;
+
+ mask = macsec_is_offloaded(macsec) ? MACSEC_OFFLOAD_FEATURES
+ : MACSEC_FEATURES;
- features &= (real_dev->features & MACSEC_FEATURES) |
+ features &= (real_dev->features & mask) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
- features |= NETIF_F_LLTX;
return features;
}
@@ -3610,21 +3661,19 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
struct sockaddr *addr = p;
+ u8 old_addr[ETH_ALEN];
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (!(dev->flags & IFF_UP))
- goto out;
-
- err = dev_uc_add(real_dev, addr->sa_data);
- if (err < 0)
- return err;
-
- dev_uc_del(real_dev, dev->dev_addr);
+ if (dev->flags & IFF_UP) {
+ err = dev_uc_add(real_dev, addr->sa_data);
+ if (err < 0)
+ return err;
+ }
-out:
+ ether_addr_copy(old_addr, dev->dev_addr);
eth_hw_addr_set(dev, addr->sa_data);
/* If h/w offloading is available, propagate to the device */
@@ -3633,13 +3682,29 @@ out:
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
- if (ops) {
- ctx.secy = &macsec->secy;
- macsec_offload(ops->mdo_upd_secy, &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto restore_old_addr;
}
+
+ ctx.secy = &macsec->secy;
+ err = macsec_offload(ops->mdo_upd_secy, &ctx);
+ if (err)
+ goto restore_old_addr;
}
+ if (dev->flags & IFF_UP)
+ dev_uc_del(real_dev, old_addr);
+
return 0;
+
+restore_old_addr:
+ if (dev->flags & IFF_UP)
+ dev_uc_del(real_dev, addr->sa_data);
+
+ eth_hw_addr_set(dev, old_addr);
+
+ return err;
}
static int macsec_change_mtu(struct net_device *dev, int new_mtu)
@@ -3650,7 +3715,7 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu)
if (macsec->real_dev->mtu - extra < new_mtu)
return -ERANGE;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -3663,14 +3728,14 @@ static void macsec_get_stats64(struct net_device *dev,
dev_fetch_sw_netstats(s, dev->tstats);
- s->rx_dropped = dev->stats.rx_dropped;
- s->tx_dropped = dev->stats.tx_dropped;
- s->rx_errors = dev->stats.rx_errors;
+ s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
+ s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
+ s->rx_errors = DEV_STATS_READ(dev, rx_errors);
}
static int macsec_get_iflink(const struct net_device *dev)
{
- return macsec_priv(dev)->real_dev->ifindex;
+ return READ_ONCE(macsec_priv(dev)->real_dev->ifindex);
}
static const struct net_device_ops macsec_netdev_ops = {
@@ -3692,29 +3757,30 @@ static const struct device_type macsec_type = {
.name = "macsec",
};
+static int validate_cipher_suite(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
- [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
- [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
+ [IFLA_MACSEC_ICV_LEN] = NLA_POLICY_RANGE(NLA_U8, MACSEC_MIN_ICV_LEN, MACSEC_STD_ICV_LEN),
+ [IFLA_MACSEC_CIPHER_SUITE] = NLA_POLICY_VALIDATE_FN(NLA_U64, validate_cipher_suite),
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
- [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
- [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
- [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
- [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
- [IFLA_MACSEC_ES] = { .type = NLA_U8 },
- [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
- [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
- [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
- [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
+ [IFLA_MACSEC_ENCODING_SA] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
+ [IFLA_MACSEC_ENCRYPT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_INC_SCI] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_ES] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_SCB] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_REPLAY_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_VALIDATION] = NLA_POLICY_MAX(NLA_U8, MACSEC_VALIDATE_MAX),
+ [IFLA_MACSEC_OFFLOAD] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
};
static void macsec_free_netdev(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
- if (macsec->secy.tx_sc.md_dst)
- metadata_dst_free(macsec->secy.tx_sc.md_dst);
+ dst_release(&macsec->secy.tx_sc.md_dst->dst);
free_percpu(macsec->stats);
free_percpu(macsec->secy.tx_sc.stats);
@@ -3727,7 +3793,7 @@ static void macsec_setup(struct net_device *dev)
ether_setup(dev);
dev->min_mtu = 0;
dev->max_mtu = ETH_MAX_MTU;
- dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_QUEUE | IFF_UNICAST_FLT;
dev->netdev_ops = &macsec_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = macsec_free_netdev;
@@ -3817,6 +3883,8 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
+ bool macsec_offload_state_change = false;
+ enum macsec_offload offload;
struct macsec_tx_sc tx_sc;
struct macsec_secy secy;
int ret;
@@ -3840,8 +3908,18 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
if (ret)
goto cleanup;
+ if (data[IFLA_MACSEC_OFFLOAD]) {
+ offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
+ if (macsec->offload != offload) {
+ macsec_offload_state_change = true;
+ ret = macsec_update_offload(dev, offload);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
/* If h/w offloading is available, propagate to the device */
- if (macsec_is_offloaded(macsec)) {
+ if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
@@ -3981,17 +4059,15 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
return -ENOMEM;
secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
- if (!secy->tx_sc.stats) {
- free_percpu(macsec->stats);
+ if (!secy->tx_sc.stats)
return -ENOMEM;
- }
secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
- if (!secy->tx_sc.md_dst) {
- free_percpu(secy->tx_sc.stats);
- free_percpu(macsec->stats);
+ if (!secy->tx_sc.md_dst)
+ /* macsec and secy percpu stats will be freed when unregistering
+ * net_device in macsec_free_netdev()
+ */
return -ENOMEM;
- }
if (sci == MACSEC_UNDEF_SCI)
sci = dev_to_sci(dev, MACSEC_PORT_ES);
@@ -4019,11 +4095,14 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
static struct lock_class_key macsec_netdev_addr_lock_key;
-static int macsec_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macsec_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct macsec_dev *macsec = macsec_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
rx_handler_func_t *rx_handler;
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
struct net_device *real_dev;
@@ -4032,7 +4111,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
if (real_dev->type != ARPHRD_ETHER)
@@ -4122,6 +4201,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
err = macsec_offload(ops->mdo_add_secy, &ctx);
if (err)
goto del_dev;
+
+ macsec->insert_tx_tag =
+ macsec_needs_tx_tag(macsec, ops);
}
}
@@ -4129,6 +4211,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
goto del_dev;
+ netdev_update_features(dev);
netif_stacked_transfer_operstate(real_dev, dev);
linkwatch_fire_event(dev);
@@ -4145,20 +4228,30 @@ unregister:
return err;
}
+static int validate_cipher_suite(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ switch (nla_get_u64(attr)) {
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_CIPHER_ID_GCM_AES_256:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
+ case MACSEC_DEFAULT_CIPHER_ID:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- u64 csid = MACSEC_DEFAULT_CIPHER_ID;
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
- int flag;
bool es, scb, sci;
if (!data)
return 0;
- if (data[IFLA_MACSEC_CIPHER_SUITE])
- csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
-
if (data[IFLA_MACSEC_ICV_LEN]) {
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
@@ -4174,45 +4267,13 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
}
}
- switch (csid) {
- case MACSEC_CIPHER_ID_GCM_AES_128:
- case MACSEC_CIPHER_ID_GCM_AES_256:
- case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
- case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
- case MACSEC_DEFAULT_CIPHER_ID:
- if (icv_len < MACSEC_MIN_ICV_LEN ||
- icv_len > MACSEC_STD_ICV_LEN)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- if (data[IFLA_MACSEC_ENCODING_SA]) {
- if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
- return -EINVAL;
- }
-
- for (flag = IFLA_MACSEC_ENCODING_SA + 1;
- flag < IFLA_MACSEC_VALIDATION;
- flag++) {
- if (data[flag]) {
- if (nla_get_u8(data[flag]) > 1)
- return -EINVAL;
- }
- }
-
- es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
- sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
- scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
+ es = nla_get_u8_default(data[IFLA_MACSEC_ES], false);
+ sci = nla_get_u8_default(data[IFLA_MACSEC_INC_SCI], false);
+ scb = nla_get_u8_default(data[IFLA_MACSEC_SCB], false);
if ((sci && (scb || es)) || (scb && es))
return -EINVAL;
- if (data[IFLA_MACSEC_VALIDATION] &&
- nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
- return -EINVAL;
-
if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
!data[IFLA_MACSEC_WINDOW])
@@ -4226,6 +4287,18 @@ static struct net *macsec_get_link_net(const struct net_device *dev)
return dev_net(macsec_priv(dev)->real_dev);
}
+struct net_device *macsec_get_real_dev(const struct net_device *dev)
+{
+ return macsec_priv(dev)->real_dev;
+}
+EXPORT_SYMBOL_GPL(macsec_get_real_dev);
+
+bool macsec_netdev_is_offloaded(struct net_device *dev)
+{
+ return macsec_is_offloaded(macsec_priv(dev));
+}
+EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded);
+
static size_t macsec_get_size(const struct net_device *dev)
{
return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
@@ -4240,16 +4313,22 @@ static size_t macsec_get_size(const struct net_device *dev)
nla_total_size(1) + /* IFLA_MACSEC_SCB */
nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
+ nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
0;
}
static int macsec_fill_info(struct sk_buff *skb,
const struct net_device *dev)
{
- struct macsec_secy *secy = &macsec_priv(dev)->secy;
- struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_dev *macsec;
+ struct macsec_secy *secy;
u64 csid;
+ macsec = macsec_priv(dev);
+ secy = &macsec->secy;
+ tx_sc = &secy->tx_sc;
+
switch (secy->key_len) {
case MACSEC_GCM_AES_128_SAK_LEN:
csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
@@ -4274,6 +4353,7 @@ static int macsec_fill_info(struct sk_buff *skb,
nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
+ nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
0)
goto nla_put_failure;
@@ -4312,31 +4392,26 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
+ struct macsec_rxh_data *rxd;
+ struct macsec_dev *m, *n;
LIST_HEAD(head);
if (!is_macsec_master(real_dev))
return NOTIFY_DONE;
+ rxd = macsec_data_rtnl(real_dev);
+
switch (event) {
case NETDEV_DOWN:
case NETDEV_UP:
- case NETDEV_CHANGE: {
- struct macsec_dev *m, *n;
- struct macsec_rxh_data *rxd;
-
- rxd = macsec_data_rtnl(real_dev);
+ case NETDEV_CHANGE:
list_for_each_entry_safe(m, n, &rxd->secys, secys) {
struct net_device *dev = m->secy.netdev;
netif_stacked_transfer_operstate(real_dev, dev);
}
break;
- }
- case NETDEV_UNREGISTER: {
- struct macsec_dev *m, *n;
- struct macsec_rxh_data *rxd;
-
- rxd = macsec_data_rtnl(real_dev);
+ case NETDEV_UNREGISTER:
list_for_each_entry_safe(m, n, &rxd->secys, secys) {
macsec_common_dellink(m->secy.netdev, &head);
}
@@ -4346,12 +4421,7 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
unregister_netdevice_many(&head);
break;
- }
- case NETDEV_CHANGEMTU: {
- struct macsec_dev *m;
- struct macsec_rxh_data *rxd;
-
- rxd = macsec_data_rtnl(real_dev);
+ case NETDEV_CHANGEMTU:
list_for_each_entry(m, &rxd->secys, secys) {
struct net_device *dev = m->secy.netdev;
unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
@@ -4360,7 +4430,13 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
if (dev->mtu > mtu)
dev_set_mtu(dev, mtu);
}
- }
+ break;
+ case NETDEV_FEAT_CHANGE:
+ list_for_each_entry(m, &rxd->secys, secys) {
+ macsec_inherit_tso_max(m->secy.netdev);
+ netdev_update_features(m->secy.netdev);
+ }
+ break;
}
return NOTIFY_OK;