diff options
Diffstat (limited to 'drivers/net/macsec.c')
| -rw-r--r-- | drivers/net/macsec.c | 2220 |
1 files changed, 1562 insertions, 658 deletions
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 64a982563d59..5200fd5a10e5 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/net/macsec.c - MACsec device * * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include <linux/types.h> @@ -15,18 +11,21 @@ #include <linux/module.h> #include <crypto/aead.h> #include <linux/etherdevice.h> +#include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/refcount.h> #include <net/genetlink.h> #include <net/sock.h> #include <net/gro_cells.h> +#include <net/macsec.h> +#include <net/dst_metadata.h> +#include <net/netdev_lock.h> +#include <linux/phy.h> +#include <linux/byteorder/generic.h> +#include <linux/if_arp.h> #include <uapi/linux/if_macsec.h> -typedef u64 __bitwise sci_t; - -#define MACSEC_SCI_LEN 8 - /* SecTAG length = macsec_eth_header without the optional SCI */ #define MACSEC_TAG_LEN 6 @@ -47,22 +46,10 @@ struct macsec_eth_header { u8 secure_channel_id[8]; /* optional */ } __packed; -#define MACSEC_TCI_VERSION 0x80 -#define MACSEC_TCI_ES 0x40 /* end station */ -#define MACSEC_TCI_SC 0x20 /* SCI present */ -#define MACSEC_TCI_SCB 0x10 /* epon */ -#define MACSEC_TCI_E 0x08 /* encryption */ -#define MACSEC_TCI_C 0x04 /* changed text */ -#define MACSEC_AN_MASK 0x03 /* association number */ -#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) - /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ #define MIN_NON_SHORT_LEN 48 #define GCM_AES_IV_LEN 12 -#define DEFAULT_ICV_LEN 16 - -#define MACSEC_NUM_AN 4 /* 2 bits for the association number */ #define for_each_rxsc(secy, sc) \ for (sc = rcu_dereference_bh(secy->rx_sc); \ @@ -73,6 +60,16 @@ struct macsec_eth_header { sc; \ sc = rtnl_dereference(sc->next)) +#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) + +struct gcm_iv_xpn { + union { + u8 short_secure_channel_id[4]; + ssci_t ssci; + }; + __be64 pn; +} __packed; + struct gcm_iv { union { u8 secure_channel_id[8]; @@ -81,178 +78,8 @@ struct gcm_iv { __be32 pn; }; -/** - * struct macsec_key - SA key - * @id: user-provided key identifier - * @tfm: crypto struct, key storage - */ -struct macsec_key { - u8 id[MACSEC_KEYID_LEN]; - struct crypto_aead *tfm; -}; - -struct macsec_rx_sc_stats { - __u64 InOctetsValidated; - __u64 InOctetsDecrypted; - __u64 InPktsUnchecked; - __u64 InPktsDelayed; - __u64 InPktsOK; - __u64 InPktsInvalid; - __u64 InPktsLate; - __u64 InPktsNotValid; - __u64 InPktsNotUsingSA; - __u64 InPktsUnusedSA; -}; - -struct macsec_rx_sa_stats { - __u32 InPktsOK; - __u32 InPktsInvalid; - __u32 InPktsNotValid; - __u32 InPktsNotUsingSA; - __u32 InPktsUnusedSA; -}; - -struct macsec_tx_sa_stats { - __u32 OutPktsProtected; - __u32 OutPktsEncrypted; -}; - -struct macsec_tx_sc_stats { - __u64 OutPktsProtected; - __u64 OutPktsEncrypted; - __u64 OutOctetsProtected; - __u64 OutOctetsEncrypted; -}; - -struct macsec_dev_stats { - __u64 OutPktsUntagged; - __u64 InPktsUntagged; - __u64 OutPktsTooLong; - __u64 InPktsNoTag; - __u64 InPktsBadTag; - __u64 InPktsUnknownSCI; - __u64 InPktsNoSCI; - __u64 InPktsOverrun; -}; - -/** - * struct macsec_rx_sa - receive secure association - * @active: - * @next_pn: packet number expected for the next packet - * @lock: protects next_pn manipulations - * @key: key structure - * @stats: per-SA stats - */ -struct macsec_rx_sa { - struct macsec_key key; - spinlock_t lock; - u32 next_pn; - refcount_t refcnt; - bool active; - struct macsec_rx_sa_stats __percpu *stats; - struct macsec_rx_sc *sc; - struct rcu_head rcu; -}; - -struct pcpu_rx_sc_stats { - struct macsec_rx_sc_stats stats; - struct u64_stats_sync syncp; -}; - -/** - * struct macsec_rx_sc - receive secure channel - * @sci: secure channel identifier for this SC - * @active: channel is active - * @sa: array of secure associations - * @stats: per-SC stats - */ -struct macsec_rx_sc { - struct macsec_rx_sc __rcu *next; - sci_t sci; - bool active; - struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; - struct pcpu_rx_sc_stats __percpu *stats; - refcount_t refcnt; - struct rcu_head rcu_head; -}; - -/** - * struct macsec_tx_sa - transmit secure association - * @active: - * @next_pn: packet number to use for the next packet - * @lock: protects next_pn manipulations - * @key: key structure - * @stats: per-SA stats - */ -struct macsec_tx_sa { - struct macsec_key key; - spinlock_t lock; - u32 next_pn; - refcount_t refcnt; - bool active; - struct macsec_tx_sa_stats __percpu *stats; - struct rcu_head rcu; -}; - -struct pcpu_tx_sc_stats { - struct macsec_tx_sc_stats stats; - struct u64_stats_sync syncp; -}; - -/** - * struct macsec_tx_sc - transmit secure channel - * @active: - * @encoding_sa: association number of the SA currently in use - * @encrypt: encrypt packets on transmit, or authenticate only - * @send_sci: always include the SCI in the SecTAG - * @end_station: - * @scb: single copy broadcast flag - * @sa: array of secure associations - * @stats: stats for this TXSC - */ -struct macsec_tx_sc { - bool active; - u8 encoding_sa; - bool encrypt; - bool send_sci; - bool end_station; - bool scb; - struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; - struct pcpu_tx_sc_stats __percpu *stats; -}; - #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT -/** - * struct macsec_secy - MACsec Security Entity - * @netdev: netdevice for this SecY - * @n_rx_sc: number of receive secure channels configured on this SecY - * @sci: secure channel identifier used for tx - * @key_len: length of keys used by the cipher suite - * @icv_len: length of ICV used by the cipher suite - * @validate_frames: validation mode - * @operational: MAC_Operational flag - * @protect_frames: enable protection for this SecY - * @replay_protect: enable packet number checks on receive - * @replay_window: size of the replay window - * @tx_sc: transmit secure channel - * @rx_sc: linked list of receive secure channels - */ -struct macsec_secy { - struct net_device *netdev; - unsigned int n_rx_sc; - sci_t sci; - u16 key_len; - u16 icv_len; - enum macsec_validation_type validate_frames; - bool operational; - bool protect_frames; - bool replay_protect; - u32 replay_window; - struct macsec_tx_sc tx_sc; - struct macsec_rx_sc __rcu *rx_sc; -}; - struct pcpu_secy_stats { struct macsec_dev_stats stats; struct u64_stats_sync syncp; @@ -262,16 +89,23 @@ struct pcpu_secy_stats { * struct macsec_dev - private data * @secy: SecY config * @real_dev: pointer to underlying netdevice + * @dev_tracker: refcount tracker for @real_dev reference * @stats: MACsec device stats * @secys: linked list of SecY's on the underlying device + * @gro_cells: pointer to the Generic Receive Offload cell + * @offload: status of offloading on the MACsec device + * @insert_tx_tag: when offloading, device requires to insert an + * additional tag */ struct macsec_dev { struct macsec_secy secy; struct net_device *real_dev; + netdevice_tracker dev_tracker; struct pcpu_secy_stats __percpu *stats; struct list_head secys; struct gro_cells gro_cells; - unsigned int nest_level; + enum macsec_offload offload; + bool insert_tx_tag; }; /** @@ -389,27 +223,21 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) return (struct macsec_cb *)skb->cb; } -#define MACSEC_PORT_ES (htons(0x0001)) #define MACSEC_PORT_SCB (0x0000) #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) +#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) #define MACSEC_GCM_AES_128_SAK_LEN 16 #define MACSEC_GCM_AES_256_SAK_LEN 32 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN +#define DEFAULT_XPN false #define DEFAULT_SEND_SCI true #define DEFAULT_ENCRYPT false #define DEFAULT_ENCODING_SA 0 +#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) -static bool send_sci(const struct macsec_secy *secy) -{ - const struct macsec_tx_sc *tx_sc = &secy->tx_sc; - - return tx_sc->send_sci || - (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); -} - -static sci_t make_sci(u8 *addr, __be16 port) +static sci_t make_sci(const u8 *addr, __be16 port) { sci_t sci; @@ -419,15 +247,39 @@ static sci_t make_sci(u8 *addr, __be16 port) return sci; } -static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) +static sci_t macsec_active_sci(struct macsec_secy *secy) { - sci_t sci; + struct macsec_rx_sc *rx_sc = rcu_dereference_bh(secy->rx_sc); + + /* Case single RX SC */ + if (rx_sc && !rcu_dereference_bh(rx_sc->next)) + return (rx_sc->active) ? rx_sc->sci : 0; + /* Case no RX SC or multiple */ + else + return 0; +} + +static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present, + struct macsec_rxh_data *rxd) +{ + struct macsec_dev *macsec; + sci_t sci = 0; - if (sci_present) + /* SC = 1 */ + if (sci_present) { memcpy(&sci, hdr->secure_channel_id, sizeof(hdr->secure_channel_id)); - else + /* SC = 0; ES = 0 */ + } else if ((!(hdr->tci_an & (MACSEC_TCI_ES | MACSEC_TCI_SC))) && + (list_is_singular(&rxd->secys))) { + /* Only one SECY should exist on this scenario */ + macsec = list_first_or_null_rcu(&rxd->secys, struct macsec_dev, + secys); + if (macsec) + return macsec_active_sci(&macsec->secy); + } else { sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); + } return sci; } @@ -473,7 +325,7 @@ static void macsec_fill_sectag(struct macsec_eth_header *h, /* with GCM, C/E clear for !encrypt, both set for encrypt */ if (tx_sc->encrypt) h->tci_an |= MACSEC_TCI_CONFID; - else if (secy->icv_len != DEFAULT_ICV_LEN) + else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) h->tci_an |= MACSEC_TCI_C; h->tci_an |= tx_sc->encoding_sa; @@ -485,8 +337,67 @@ static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) h->short_length = data_len; } -/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ -static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) +/* Checks if a MACsec interface is being offloaded to an hardware engine */ +static bool macsec_is_offloaded(struct macsec_dev *macsec) +{ + if (macsec->offload == MACSEC_OFFLOAD_MAC || + macsec->offload == MACSEC_OFFLOAD_PHY) + return true; + + return false; +} + +/* Checks if underlying layers implement MACsec offloading functions. */ +static bool macsec_check_offload(enum macsec_offload offload, + struct macsec_dev *macsec) +{ + if (!macsec || !macsec->real_dev) + return false; + + if (offload == MACSEC_OFFLOAD_PHY) + return macsec->real_dev->phydev && + macsec->real_dev->phydev->macsec_ops; + else if (offload == MACSEC_OFFLOAD_MAC) + return macsec->real_dev->features & NETIF_F_HW_MACSEC && + macsec->real_dev->macsec_ops; + + return false; +} + +static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, + struct macsec_dev *macsec, + struct macsec_context *ctx) +{ + if (ctx) { + memset(ctx, 0, sizeof(*ctx)); + ctx->offload = offload; + + if (offload == MACSEC_OFFLOAD_PHY) + ctx->phydev = macsec->real_dev->phydev; + else if (offload == MACSEC_OFFLOAD_MAC) + ctx->netdev = macsec->real_dev; + } + + if (offload == MACSEC_OFFLOAD_PHY) + return macsec->real_dev->phydev->macsec_ops; + else + return macsec->real_dev->macsec_ops; +} + +/* Returns a pointer to the MACsec ops struct if any and updates the MACsec + * context device reference if provided. + */ +static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, + struct macsec_context *ctx) +{ + if (!macsec_check_offload(macsec->offload, macsec)) + return NULL; + + return __macsec_get_ops(macsec->offload, macsec, ctx); +} + +/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ +static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) { struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; int len = skb->len - 2 * ETH_ALEN; @@ -511,8 +422,8 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) if (h->unused) return false; - /* rx.pn != 0 (figure 10-5) */ - if (!h->packet_number) + /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ + if (!h->packet_number && !xpn) return false; /* length check, f) g) h) i) */ @@ -524,6 +435,15 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN +static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, + salt_t salt) +{ + struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; + + gcm_iv->ssci = ssci ^ salt.ssci; + gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; +} + static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) { struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; @@ -537,20 +457,38 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) return (struct macsec_eth_header *)skb_mac_header(skb); } -static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) +static void __macsec_pn_wrapped(struct macsec_secy *secy, + struct macsec_tx_sa *tx_sa) { - u32 pn; + pr_debug("PN wrapped, transitioning to !oper\n"); + tx_sa->active = false; + if (secy->protect_frames) + secy->operational = false; +} +void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) +{ spin_lock_bh(&tx_sa->lock); - pn = tx_sa->next_pn; + __macsec_pn_wrapped(secy, tx_sa); + spin_unlock_bh(&tx_sa->lock); +} +EXPORT_SYMBOL_GPL(macsec_pn_wrapped); - tx_sa->next_pn++; - if (tx_sa->next_pn == 0) { - pr_debug("PN wrapped, transitioning to !oper\n"); - tx_sa->active = false; - if (secy->protect_frames) - secy->operational = false; - } +static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, + struct macsec_secy *secy) +{ + pn_t pn; + + spin_lock_bh(&tx_sa->lock); + + pn = tx_sa->next_pn_halves; + if (secy->xpn) + tx_sa->next_pn++; + else + tx_sa->next_pn_halves.lower++; + + if (tx_sa->next_pn == 0) + __macsec_pn_wrapped(secy, tx_sa); spin_unlock_bh(&tx_sa->lock); return pn; @@ -565,18 +503,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) skb->protocol = eth_hdr(skb)->h_proto; } +static unsigned int macsec_msdu_len(struct sk_buff *skb) +{ + struct macsec_dev *macsec = macsec_priv(skb->dev); + struct macsec_secy *secy = &macsec->secy; + bool sci_present = macsec_skb_cb(skb)->has_sci; + + return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; +} + static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, struct macsec_tx_sa *tx_sa) { + unsigned int msdu_len = macsec_msdu_len(skb); struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); u64_stats_update_begin(&txsc_stats->syncp); if (tx_sc->encrypt) { - txsc_stats->stats.OutOctetsEncrypted += skb->len; + txsc_stats->stats.OutOctetsEncrypted += msdu_len; txsc_stats->stats.OutPktsEncrypted++; this_cpu_inc(tx_sa->stats->OutPktsEncrypted); } else { - txsc_stats->stats.OutOctetsProtected += skb->len; + txsc_stats->stats.OutOctetsProtected += msdu_len; txsc_stats->stats.OutPktsProtected++; this_cpu_inc(tx_sa->stats->OutPktsProtected); } @@ -585,19 +533,13 @@ static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, static void count_tx(struct net_device *dev, int ret, int len) { - if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { - struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); - - u64_stats_update_begin(&stats->syncp); - stats->tx_packets++; - stats->tx_bytes += len; - u64_stats_update_end(&stats->syncp); - } + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) + dev_sw_netstats_tx_add(dev, 1, len); } -static void macsec_encrypt_done(struct crypto_async_request *base, int err) +static void macsec_encrypt_done(void *data, int err) { - struct sk_buff *skb = base->data; + struct sk_buff *skb = data; struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; @@ -606,9 +548,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err) aead_request_free(macsec_skb_cb(skb)->req); rcu_read_lock_bh(); - macsec_encrypt_finish(skb, dev); macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); - len = skb->len; + /* packet is encrypted/protected so tx_bytes must be calculated */ + len = macsec_msdu_len(skb) + 2 * ETH_ALEN; + macsec_encrypt_finish(skb, dev); ret = dev_queue_xmit(skb); count_tx(dev, ret, len); rcu_read_unlock_bh(); @@ -663,7 +606,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, struct macsec_tx_sa *tx_sa; struct macsec_dev *macsec = macsec_priv(dev); bool sci_present; - u32 pn; + pn_t pn; secy = &macsec->secy; tx_sc = &secy->tx_sc; @@ -700,17 +643,17 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, unprotected_len = skb->len; eth = eth_hdr(skb); - sci_present = send_sci(secy); + sci_present = macsec_send_sci(secy); hh = skb_push(skb, macsec_extra_len(sci_present)); memmove(hh, eth, 2 * ETH_ALEN); pn = tx_sa_update_pn(tx_sa, secy); - if (pn == 0) { + if (pn.full64 == 0) { macsec_txsa_put(tx_sa); kfree_skb(skb); return ERR_PTR(-ENOLINK); } - macsec_fill_sectag(hh, secy, pn, sci_present); + macsec_fill_sectag(hh, secy, pn.lower, sci_present); macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); skb_put(skb, secy->icv_len); @@ -741,7 +684,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, return ERR_PTR(-ENOMEM); } - macsec_fill_iv(iv, secy->sci, pn); + if (secy->xpn) + macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); + else + macsec_fill_iv(iv, secy->sci, pn.lower); sg_init_table(sg, ret); ret = skb_to_sgvec(skb, sg, 0, skb->len); @@ -764,6 +710,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, macsec_skb_cb(skb)->req = req; macsec_skb_cb(skb)->tx_sa = tx_sa; + macsec_skb_cb(skb)->has_sci = sci_present; aead_request_set_callback(req, 0, macsec_encrypt_done, skb); dev_hold(skb->dev); @@ -793,26 +740,29 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u32 lowest_pn = 0; spin_lock(&rx_sa->lock); - if (rx_sa->next_pn >= secy->replay_window) - lowest_pn = rx_sa->next_pn - secy->replay_window; + if (rx_sa->next_pn_halves.lower >= secy->replay_window) + lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; /* Now perform replay protection check again * (see IEEE 802.1AE-2006 figure 10-5) */ - if (secy->replay_protect && pn < lowest_pn) { + if (secy->replay_protect && pn < lowest_pn && + (!secy->xpn || pn_same_half(pn, lowest_pn))) { spin_unlock(&rx_sa->lock); u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_dropped); return false; } if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { + unsigned int msdu_len = macsec_msdu_len(skb); u64_stats_update_begin(&rxsc_stats->syncp); if (hdr->tci_an & MACSEC_TCI_E) - rxsc_stats->stats.InOctetsDecrypted += skb->len; + rxsc_stats->stats.InOctetsDecrypted += msdu_len; else - rxsc_stats->stats.InOctetsValidated += skb->len; + rxsc_stats->stats.InOctetsValidated += msdu_len; u64_stats_update_end(&rxsc_stats->syncp); } @@ -825,6 +775,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotValid++; u64_stats_update_end(&rxsc_stats->syncp); + this_cpu_inc(rx_sa->stats->InPktsNotValid); + DEV_STATS_INC(secy->netdev, rx_errors); return false; } @@ -848,8 +800,15 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u } u64_stats_update_end(&rxsc_stats->syncp); - if (pn >= rx_sa->next_pn) - rx_sa->next_pn = pn + 1; + // Instead of "pn >=" - to support pn overflow in xpn + if (pn + 1 > rx_sa->next_pn_halves.lower) { + rx_sa->next_pn_halves.lower = pn + 1; + } else if (secy->xpn && + !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { + rx_sa->next_pn_halves.upper++; + rx_sa->next_pn_halves.lower = pn + 1; + } + spin_unlock(&rx_sa->lock); } @@ -869,6 +828,7 @@ static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) { + skb->ip_summed = CHECKSUM_NONE; memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); skb_pull(skb, hdr_len); pskb_trim_unique(skb, skb->len - icv_len); @@ -876,17 +836,12 @@ static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) static void count_rx(struct net_device *dev, int len) { - struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); - - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += len; - u64_stats_update_end(&stats->syncp); + dev_sw_netstats_rx_add(dev, len); } -static void macsec_decrypt_done(struct crypto_async_request *base, int err) +static void macsec_decrypt_done(void *data, int err) { - struct sk_buff *skb = base->data; + struct sk_buff *skb = data; struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; @@ -909,9 +864,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) macsec_finalize_skb(skb, macsec->secy.icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, macsec->secy.netdev); - len = skb->len; if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) count_rx(dev, len); @@ -935,6 +890,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, unsigned char *iv; struct aead_request *req; struct macsec_eth_header *hdr; + u32 hdr_pn; u16 icv_len = secy->icv_len; macsec_skb_cb(skb)->valid = false; @@ -954,7 +910,21 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, } hdr = (struct macsec_eth_header *)skb->data; - macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); + hdr_pn = ntohl(hdr->packet_number); + + if (secy->xpn) { + pn_t recovered_pn = rx_sa->next_pn_halves; + + recovered_pn.lower = hdr_pn; + if (hdr_pn < rx_sa->next_pn_halves.lower && + !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) + recovered_pn.upper++; + + macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, + rx_sa->key.salt); + } else { + macsec_fill_iv(iv, sci, hdr_pn); + } sg_init_table(sg, ret); ret = skb_to_sgvec(skb, sg, 0, skb->len); @@ -1033,26 +1003,103 @@ static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) return NULL; } -static void handle_not_macsec(struct sk_buff *skb) +static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) { + /* Deliver to the uncontrolled port by default */ + enum rx_handler_result ret = RX_HANDLER_PASS; + struct ethhdr *hdr = eth_hdr(skb); + struct metadata_dst *md_dst; struct macsec_rxh_data *rxd; struct macsec_dev *macsec; + bool is_macsec_md_dst; rcu_read_lock(); rxd = macsec_data_rcu(skb->dev); + md_dst = skb_metadata_dst(skb); + is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC; - /* 10.6 If the management control validateFrames is not - * Strict, frames without a SecTAG are received, counted, and - * delivered to the Controlled Port - */ list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct sk_buff *nskb; struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); + struct net_device *ndev = macsec->secy.netdev; + + /* If h/w offloading is enabled, HW decodes frames and strips + * the SecTAG, so we have to deduce which port to deliver to. + */ + if (macsec_is_offloaded(macsec) && netif_running(ndev)) { + const struct macsec_ops *ops; + + ops = macsec_get_ops(macsec, NULL); + + if (ops->rx_uses_md_dst && !is_macsec_md_dst) + continue; + + if (is_macsec_md_dst) { + struct macsec_rx_sc *rx_sc; + + /* All drivers that implement MACsec offload + * support using skb metadata destinations must + * indicate that they do so. + */ + DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst); + rx_sc = find_rx_sc(&macsec->secy, + md_dst->u.macsec_info.sci); + if (!rx_sc) + continue; + /* device indicated macsec offload occurred */ + skb->dev = ndev; + skb->pkt_type = PACKET_HOST; + eth_skb_pkt_type(skb, ndev); + ret = RX_HANDLER_ANOTHER; + goto out; + } + + /* This datapath is insecure because it is unable to + * enforce isolation of broadcast/multicast traffic and + * unicast traffic with promiscuous mode on the macsec + * netdev. Since the core stack has no mechanism to + * check that the hardware did indeed receive MACsec + * traffic, it is possible that the response handling + * done by the MACsec port was to a plaintext packet. + * This violates the MACsec protocol standard. + */ + if (ether_addr_equal_64bits(hdr->h_dest, + ndev->dev_addr)) { + /* exact match, divert skb to this port */ + skb->dev = ndev; + skb->pkt_type = PACKET_HOST; + ret = RX_HANDLER_ANOTHER; + goto out; + } else if (is_multicast_ether_addr_64bits( + hdr->h_dest)) { + /* multicast frame, deliver on this port too */ + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + break; + + nskb->dev = ndev; + eth_skb_pkt_type(nskb, ndev); + + __netif_rx(nskb); + } else if (ndev->flags & IFF_PROMISC) { + skb->dev = ndev; + skb->pkt_type = PACKET_HOST; + ret = RX_HANDLER_ANOTHER; + goto out; + } + + continue; + } + /* 10.6 If the management control validateFrames is not + * Strict, frames without a SecTAG are received, counted, and + * delivered to the Controlled Port + */ if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoTag++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); continue; } @@ -1061,16 +1108,18 @@ static void handle_not_macsec(struct sk_buff *skb) if (!nskb) break; - nskb->dev = macsec->secy.netdev; + nskb->dev = ndev; - if (netif_rx(nskb) == NET_RX_SUCCESS) { + if (__netif_rx(nskb) == NET_RX_SUCCESS) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsUntagged++; u64_stats_update_end(&secy_stats->syncp); } } +out: rcu_read_unlock(); + return ret; } static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) @@ -1083,8 +1132,9 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) struct macsec_rx_sa *rx_sa; struct macsec_rxh_data *rxd; struct macsec_dev *macsec; - sci_t sci; - u32 pn; + unsigned int len; + sci_t sci = 0; + u32 hdr_pn; bool cbit; struct pcpu_rx_sc_stats *rxsc_stats; struct pcpu_secy_stats *secy_stats; @@ -1095,18 +1145,13 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) goto drop_direct; hdr = macsec_ethhdr(skb); - if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { - handle_not_macsec(skb); - - /* and deliver to the uncontrolled port */ - return RX_HANDLER_PASS; - } + if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) + return handle_not_macsec(skb); skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) { - *pskb = NULL; + *pskb = skb; + if (!skb) return RX_HANDLER_CONSUMED; - } pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); if (!pulled_sci) { @@ -1135,11 +1180,14 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; - sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); rcu_read_lock(); rxd = macsec_data_rcu(skb->dev); + sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci, rxd); + if (!sci) + goto drop_nosc; + list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); @@ -1160,10 +1208,11 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) secy_stats = this_cpu_ptr(macsec->stats); rxsc_stats = this_cpu_ptr(rx_sc->stats); - if (!macsec_validate_skb(skb, secy->icv_len)) { + if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsBadTag++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_errors); goto drop_nosa; } @@ -1179,6 +1228,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotUsingSA++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_errors); goto drop_nosa; } @@ -1192,19 +1242,23 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) } /* First, PN check to avoid decrypting obviously wrong packets */ - pn = ntohl(hdr->packet_number); + hdr_pn = ntohl(hdr->packet_number); if (secy->replay_protect) { bool late; spin_lock(&rx_sa->lock); - late = rx_sa->next_pn >= secy->replay_window && - pn < (rx_sa->next_pn - secy->replay_window); + late = rx_sa->next_pn_halves.lower >= secy->replay_window && + hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); + + if (secy->xpn) + late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); spin_unlock(&rx_sa->lock); if (late) { u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); goto drop; } } @@ -1227,23 +1281,25 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; } - if (!macsec_post_decrypt(skb, secy, pn)) + if (!macsec_post_decrypt(skb, secy, hdr_pn)) goto drop; deliver: macsec_finalize_skb(skb, secy->icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, secy->netdev); if (rx_sa) macsec_rxsa_put(rx_sa); macsec_rxsc_put(rx_sc); + skb_orphan(skb); ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) - count_rx(dev, skb->len); + count_rx(dev, len); else - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); rcu_read_unlock(); @@ -1254,6 +1310,7 @@ drop: macsec_rxsa_put(rx_sa); drop_nosa: macsec_rxsc_put(rx_sc); +drop_nosc: rcu_read_unlock(); drop_direct: kfree_skb(skb); @@ -1264,7 +1321,7 @@ nosci: /* 10.6.1 if the SC is not found */ cbit = !!(hdr->tci_an & MACSEC_TCI_C); if (!cbit) - macsec_finalize_skb(skb, DEFAULT_ICV_LEN, + macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); list_for_each_entry_rcu(macsec, &rxd->secys, secys) { @@ -1280,6 +1337,7 @@ nosci: u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoSCI++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_errors); continue; } @@ -1292,13 +1350,13 @@ nosci: macsec_reset_skb(nskb, macsec->secy.netdev); - ret = netif_rx(nskb); + ret = __netif_rx(nskb); if (ret == NET_RX_SUCCESS) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsUnknownSCI++; u64_stats_update_end(&secy_stats->syncp); } else { - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); } } @@ -1344,6 +1402,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, return PTR_ERR(rx_sa->key.tfm); } + rx_sa->ssci = MACSEC_UNDEF_SSCI; rx_sa->active = false; rx_sa->next_pn = 1; refcount_set(&rx_sa->refcnt, 1); @@ -1392,7 +1451,8 @@ static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) return NULL; } -static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) +static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, + bool active) { struct macsec_rx_sc *rx_sc; struct macsec_dev *macsec; @@ -1416,7 +1476,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) } rx_sc->sci = sci; - rx_sc->active = true; + rx_sc->active = active; refcount_set(&rx_sc->refcnt, 1); secy = &macsec_priv(dev)->secy; @@ -1442,6 +1502,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, return PTR_ERR(tx_sa->key.tfm); } + tx_sa->ssci = MACSEC_UNDEF_SSCI; tx_sa->active = false; refcount_set(&tx_sa->refcnt, 1); spin_lock_init(&tx_sa->lock); @@ -1474,6 +1535,11 @@ static struct net_device *get_dev_from_nl(struct net *net, return dev; } +static enum macsec_offload nla_get_offload(const struct nlattr *nla) +{ + return (__force enum macsec_offload)nla_get_u8(nla); +} + static sci_t nla_get_sci(const struct nlattr *nla) { return (__force sci_t)nla_get_u64(nla); @@ -1485,6 +1551,16 @@ static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); } +static ssci_t nla_get_ssci(const struct nlattr *nla) +{ + return (__force ssci_t)nla_get_u32(nla); +} + +static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) +{ + return nla_put_u32(skb, attrtype, (__force u64)value); +} + static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, struct nlattr **attrs, struct nlattr **tb_sa, @@ -1507,9 +1583,6 @@ static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, if (IS_ERR(dev)) return ERR_CAST(dev); - if (*assoc_num >= MACSEC_NUM_AN) - return ERR_PTR(-EINVAL); - secy = &macsec_priv(dev)->secy; tx_sc = &secy->tx_sc; @@ -1570,8 +1643,6 @@ static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, return ERR_PTR(-EINVAL); *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); - if (*assoc_num >= MACSEC_NUM_AN) - return ERR_PTR(-EINVAL); rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); if (IS_ERR(rx_sc)) @@ -1589,31 +1660,54 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, + [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, }; static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, - [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, + [MACSEC_RXSC_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1), }; static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { - [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, - [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, - [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, - [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, - .len = MACSEC_KEYID_LEN, }, - [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, - .len = MACSEC_MAX_KEY_LEN, }, + [MACSEC_SA_ATTR_AN] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1), + [MACSEC_SA_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1), + [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN(NLA_UINT, 1), + [MACSEC_SA_ATTR_KEYID] = NLA_POLICY_EXACT_LEN(MACSEC_KEYID_LEN), + [MACSEC_SA_ATTR_KEY] = NLA_POLICY_MAX_LEN(MACSEC_MAX_KEY_LEN), + [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, + [MACSEC_SA_ATTR_SALT] = NLA_POLICY_EXACT_LEN(MACSEC_SALT_LEN), +}; + +static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { + [MACSEC_OFFLOAD_ATTR_TYPE] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX), }; +/* Offloads an operation to a device driver */ +static int macsec_offload(int (* const func)(struct macsec_context *), + struct macsec_context *ctx) +{ + int ret; + + if (unlikely(!func)) + return 0; + + if (ctx->offload == MACSEC_OFFLOAD_PHY) + mutex_lock(&ctx->phydev->lock); + + ret = (*func)(ctx); + + if (ctx->offload == MACSEC_OFFLOAD_PHY) + mutex_unlock(&ctx->phydev->lock); + + return ret; +} + static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) { if (!attrs[MACSEC_ATTR_SA_CONFIG]) return -EINVAL; - if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, - attrs[MACSEC_ATTR_SA_CONFIG], - macsec_genl_sa_policy, NULL)) + if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) return -EINVAL; return 0; @@ -1624,9 +1718,7 @@ static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) return -EINVAL; - if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, - attrs[MACSEC_ATTR_RXSC_CONFIG], - macsec_genl_rxsc_policy, NULL)) + if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) return -EINVAL; return 0; @@ -1639,20 +1731,6 @@ static bool validate_add_rxsa(struct nlattr **attrs) !attrs[MACSEC_SA_ATTR_KEYID]) return false; - if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) - return false; - - if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) - return false; - - if (attrs[MACSEC_SA_ATTR_ACTIVE]) { - if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) - return false; - } - - if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) - return false; - return true; } @@ -1664,6 +1742,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) struct macsec_rx_sc *rx_sc; struct macsec_rx_sa *rx_sa; unsigned char assoc_num; + int pn_len; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; int err; @@ -1696,6 +1775,22 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; + if (tb_sa[MACSEC_SA_ATTR_PN] && + nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { + pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", + nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); + rtnl_unlock(); + return -EINVAL; + } + + if (secy->xpn) { + if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { + rtnl_unlock(); + return -EINVAL; + } + } + rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); if (rx_sa) { rtnl_unlock(); @@ -1718,33 +1813,55 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&rx_sa->lock); - rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&rx_sa->lock); } if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); - nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); rx_sa->sc = rx_sc; - rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); - rtnl_unlock(); + if (secy->xpn) { + rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); + nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], + MACSEC_SALT_LEN); + } - return 0; -} + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(netdev_priv(dev))) { + const struct macsec_ops *ops; + struct macsec_context ctx; -static bool validate_add_rxsc(struct nlattr **attrs) -{ - if (!attrs[MACSEC_RXSC_ATTR_SCI]) - return false; + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + err = -EOPNOTSUPP; + goto cleanup; + } - if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { - if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) - return false; + ctx.sa.assoc_num = assoc_num; + ctx.sa.rx_sa = rx_sa; + ctx.secy = secy; + memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), + secy->key_len); + + err = macsec_offload(ops->mdo_add_rxsa, &ctx); + memzero_explicit(ctx.sa.key, secy->key_len); + if (err) + goto cleanup; } - return true; + nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); + rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); + + rtnl_unlock(); + + return 0; + +cleanup: + macsec_rxsa_put(rx_sa); + rtnl_unlock(); + return err; } static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) @@ -1754,6 +1871,9 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) struct nlattr **attrs = info->attrs; struct macsec_rx_sc *rx_sc; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; + struct macsec_secy *secy; + bool active = true; + int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -1761,7 +1881,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) if (parse_rxsc_config(attrs, tb_rxsc)) return -EINVAL; - if (!validate_add_rxsc(tb_rxsc)) + if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) return -EINVAL; rtnl_lock(); @@ -1771,20 +1891,45 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) return PTR_ERR(dev); } + secy = &macsec_priv(dev)->secy; sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); - rx_sc = create_rx_sc(dev, sci); + if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) + active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); + + rx_sc = create_rx_sc(dev, sci, active); if (IS_ERR(rx_sc)) { rtnl_unlock(); return PTR_ERR(rx_sc); } - if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) - rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); + if (macsec_is_offloaded(netdev_priv(dev))) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + ret = -EOPNOTSUPP; + goto cleanup; + } + + ctx.rx_sc = rx_sc; + ctx.secy = secy; + + ret = macsec_offload(ops->mdo_add_rxsc, &ctx); + if (ret) + goto cleanup; + } rtnl_unlock(); return 0; + +cleanup: + del_rx_sc(secy, sci); + free_rx_sc(rx_sc); + rtnl_unlock(); + return ret; } static bool validate_add_txsa(struct nlattr **attrs) @@ -1795,20 +1940,6 @@ static bool validate_add_txsa(struct nlattr **attrs) !attrs[MACSEC_SA_ATTR_KEYID]) return false; - if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) - return false; - - if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) - return false; - - if (attrs[MACSEC_SA_ATTR_ACTIVE]) { - if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) - return false; - } - - if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) - return false; - return true; } @@ -1820,7 +1951,9 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) struct macsec_tx_sc *tx_sc; struct macsec_tx_sa *tx_sa; unsigned char assoc_num; + int pn_len; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + bool was_operational; int err; if (!attrs[MACSEC_ATTR_IFINDEX]) @@ -1851,6 +1984,21 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; + if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { + pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", + nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); + rtnl_unlock(); + return -EINVAL; + } + + if (secy->xpn) { + if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { + rtnl_unlock(); + return -EINVAL; + } + } + tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); if (tx_sa) { rtnl_unlock(); @@ -1871,23 +2019,58 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) return err; } - nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); - spin_lock_bh(&tx_sa->lock); - tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&tx_sa->lock); if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); + was_operational = secy->operational; if (assoc_num == tx_sc->encoding_sa && tx_sa->active) secy->operational = true; + if (secy->xpn) { + tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); + nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], + MACSEC_SALT_LEN); + } + + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(netdev_priv(dev))) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + err = -EOPNOTSUPP; + goto cleanup; + } + + ctx.sa.assoc_num = assoc_num; + ctx.sa.tx_sa = tx_sa; + ctx.secy = secy; + memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), + secy->key_len); + + err = macsec_offload(ops->mdo_add_txsa, &ctx); + memzero_explicit(ctx.sa.key, secy->key_len); + if (err) + goto cleanup; + } + + nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); rtnl_unlock(); return 0; + +cleanup: + secy->operational = was_operational; + macsec_txsa_put(tx_sa); + rtnl_unlock(); + return err; } static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) @@ -1900,6 +2083,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) u8 assoc_num; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -1923,12 +2107,36 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) return -EBUSY; } + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(netdev_priv(dev))) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + ret = -EOPNOTSUPP; + goto cleanup; + } + + ctx.sa.assoc_num = assoc_num; + ctx.sa.rx_sa = rx_sa; + ctx.secy = secy; + + ret = macsec_offload(ops->mdo_del_rxsa, &ctx); + if (ret) + goto cleanup; + } + RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); clear_rx_sa(rx_sa); rtnl_unlock(); return 0; + +cleanup: + rtnl_unlock(); + return ret; } static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) @@ -1939,6 +2147,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) struct macsec_rx_sc *rx_sc; sci_t sci; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; + int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -1965,10 +2174,32 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) return -ENODEV; } + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(netdev_priv(dev))) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + ret = -EOPNOTSUPP; + goto cleanup; + } + + ctx.rx_sc = rx_sc; + ctx.secy = secy; + ret = macsec_offload(ops->mdo_del_rxsc, &ctx); + if (ret) + goto cleanup; + } + free_rx_sc(rx_sc); rtnl_unlock(); return 0; + +cleanup: + rtnl_unlock(); + return ret; } static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) @@ -1980,6 +2211,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) struct macsec_tx_sa *tx_sa; u8 assoc_num; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -2000,32 +2232,47 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) return -EBUSY; } + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(netdev_priv(dev))) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + ret = -EOPNOTSUPP; + goto cleanup; + } + + ctx.sa.assoc_num = assoc_num; + ctx.sa.tx_sa = tx_sa; + ctx.secy = secy; + + ret = macsec_offload(ops->mdo_del_txsa, &ctx); + if (ret) + goto cleanup; + } + RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); clear_tx_sa(tx_sa); rtnl_unlock(); return 0; + +cleanup: + rtnl_unlock(); + return ret; } static bool validate_upd_sa(struct nlattr **attrs) { if (!attrs[MACSEC_SA_ATTR_AN] || attrs[MACSEC_SA_ATTR_KEY] || - attrs[MACSEC_SA_ATTR_KEYID]) - return false; - - if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) - return false; - - if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) + attrs[MACSEC_SA_ATTR_KEYID] || + attrs[MACSEC_SA_ATTR_SSCI] || + attrs[MACSEC_SA_ATTR_SALT]) return false; - if (attrs[MACSEC_SA_ATTR_ACTIVE]) { - if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) - return false; - } - return true; } @@ -2038,6 +2285,11 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) struct macsec_tx_sa *tx_sa; u8 assoc_num; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + bool was_operational, was_active; + pn_t prev_pn; + int ret = 0; + + prev_pn.full64 = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -2057,20 +2309,65 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) } if (tb_sa[MACSEC_SA_ATTR_PN]) { + int pn_len; + + pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; + if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { + pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", + nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); + rtnl_unlock(); + return -EINVAL; + } + spin_lock_bh(&tx_sa->lock); - tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + prev_pn = tx_sa->next_pn_halves; + tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&tx_sa->lock); } + was_active = tx_sa->active; if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); + was_operational = secy->operational; if (assoc_num == tx_sc->encoding_sa) secy->operational = tx_sa->active; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(netdev_priv(dev))) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + ret = -EOPNOTSUPP; + goto cleanup; + } + + ctx.sa.assoc_num = assoc_num; + ctx.sa.tx_sa = tx_sa; + ctx.sa.update_pn = !!prev_pn.full64; + ctx.secy = secy; + + ret = macsec_offload(ops->mdo_upd_txsa, &ctx); + if (ret) + goto cleanup; + } + rtnl_unlock(); return 0; + +cleanup: + if (tb_sa[MACSEC_SA_ATTR_PN]) { + spin_lock_bh(&tx_sa->lock); + tx_sa->next_pn_halves = prev_pn; + spin_unlock_bh(&tx_sa->lock); + } + tx_sa->active = was_active; + secy->operational = was_operational; + rtnl_unlock(); + return ret; } static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) @@ -2083,6 +2380,11 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) u8 assoc_num; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + bool was_active; + pn_t prev_pn; + int ret = 0; + + prev_pn.full64 = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -2105,16 +2407,59 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) } if (tb_sa[MACSEC_SA_ATTR_PN]) { + int pn_len; + + pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; + if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { + pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", + nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); + rtnl_unlock(); + return -EINVAL; + } + spin_lock_bh(&rx_sa->lock); - rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + prev_pn = rx_sa->next_pn_halves; + rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&rx_sa->lock); } + was_active = rx_sa->active; if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(netdev_priv(dev))) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + ret = -EOPNOTSUPP; + goto cleanup; + } + + ctx.sa.assoc_num = assoc_num; + ctx.sa.rx_sa = rx_sa; + ctx.sa.update_pn = !!prev_pn.full64; + ctx.secy = secy; + + ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); + if (ret) + goto cleanup; + } + rtnl_unlock(); return 0; + +cleanup: + if (tb_sa[MACSEC_SA_ATTR_PN]) { + spin_lock_bh(&rx_sa->lock); + rx_sa->next_pn_halves = prev_pn; + spin_unlock_bh(&rx_sa->lock); + } + rx_sa->active = was_active; + rtnl_unlock(); + return ret; } static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) @@ -2124,6 +2469,9 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) struct macsec_secy *secy; struct macsec_rx_sc *rx_sc; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; + unsigned int prev_n_rx_sc; + bool was_active; + int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -2131,7 +2479,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) if (parse_rxsc_config(attrs, tb_rxsc)) return -EINVAL; - if (!validate_add_rxsc(tb_rxsc)) + if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) return -EINVAL; rtnl_lock(); @@ -2141,6 +2489,8 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) return PTR_ERR(rx_sc); } + was_active = rx_sc->active; + prev_n_rx_sc = secy->n_rx_sc; if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); @@ -2150,211 +2500,488 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) rx_sc->active = new; } + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(netdev_priv(dev))) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + ret = -EOPNOTSUPP; + goto cleanup; + } + + ctx.rx_sc = rx_sc; + ctx.secy = secy; + + ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); + if (ret) + goto cleanup; + } + rtnl_unlock(); return 0; + +cleanup: + secy->n_rx_sc = prev_n_rx_sc; + rx_sc->active = was_active; + rtnl_unlock(); + return ret; } -static int copy_tx_sa_stats(struct sk_buff *skb, - struct macsec_tx_sa_stats __percpu *pstats) +static bool macsec_is_configured(struct macsec_dev *macsec) { - struct macsec_tx_sa_stats sum = {0, }; + struct macsec_secy *secy = &macsec->secy; + struct macsec_tx_sc *tx_sc = &secy->tx_sc; + int i; + + if (secy->rx_sc) + return true; + + for (i = 0; i < MACSEC_NUM_AN; i++) + if (tx_sc->sa[i]) + return true; + + return false; +} + +static bool macsec_needs_tx_tag(struct macsec_dev *macsec, + const struct macsec_ops *ops) +{ + return macsec->offload == MACSEC_OFFLOAD_PHY && + ops->mdo_insert_tx_tag; +} + +static void macsec_set_head_tail_room(struct net_device *dev) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + int needed_headroom, needed_tailroom; + const struct macsec_ops *ops; + + ops = macsec_get_ops(macsec, NULL); + if (ops) { + needed_headroom = ops->needed_headroom; + needed_tailroom = ops->needed_tailroom; + } else { + needed_headroom = MACSEC_NEEDED_HEADROOM; + needed_tailroom = MACSEC_NEEDED_TAILROOM; + } + + dev->needed_headroom = real_dev->needed_headroom + needed_headroom; + dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom; +} + +static void macsec_inherit_tso_max(struct net_device *dev) +{ + struct macsec_dev *macsec = macsec_priv(dev); + + /* if macsec is offloaded, we need to follow the lower + * device's capabilities. otherwise, we can ignore them. + */ + if (macsec_is_offloaded(macsec)) + netif_inherit_tso_max(dev, macsec->real_dev); +} + +static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) +{ + enum macsec_offload prev_offload; + const struct macsec_ops *ops; + struct macsec_context ctx; + struct macsec_dev *macsec; + int ret = 0; + + macsec = macsec_priv(dev); + + /* Check if the offloading mode is supported by the underlying layers */ + if (offload != MACSEC_OFFLOAD_OFF && + !macsec_check_offload(offload, macsec)) + return -EOPNOTSUPP; + + /* Check if the net device is busy. */ + if (netif_running(dev)) + return -EBUSY; + + /* Check if the device already has rules configured: we do not support + * rules migration. + */ + if (macsec_is_configured(macsec)) + return -EBUSY; + + prev_offload = macsec->offload; + + ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, + macsec, &ctx); + if (!ops) + return -EOPNOTSUPP; + + macsec->offload = offload; + + ctx.secy = &macsec->secy; + ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) + : macsec_offload(ops->mdo_add_secy, &ctx); + if (ret) { + macsec->offload = prev_offload; + return ret; + } + + macsec_set_head_tail_room(dev); + macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops); + + macsec_inherit_tso_max(dev); + + netdev_update_features(dev); + + return ret; +} + +static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; + struct nlattr **attrs = info->attrs; + enum macsec_offload offload; + struct macsec_dev *macsec; + struct net_device *dev; + int ret = 0; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (!attrs[MACSEC_ATTR_OFFLOAD]) + return -EINVAL; + + if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, + attrs[MACSEC_ATTR_OFFLOAD], + macsec_genl_offload_policy, NULL)) + return -EINVAL; + + rtnl_lock(); + + dev = get_dev_from_nl(genl_info_net(info), attrs); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + macsec = macsec_priv(dev); + + if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { + ret = -EINVAL; + goto out; + } + + offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); + + if (macsec->offload != offload) + ret = macsec_update_offload(dev, offload); +out: + rtnl_unlock(); + return ret; +} + +static void get_tx_sa_stats(struct net_device *dev, int an, + struct macsec_tx_sa *tx_sa, + struct macsec_tx_sa_stats *sum) +{ + struct macsec_dev *macsec = macsec_priv(dev); int cpu; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.sa.assoc_num = an; + ctx.sa.tx_sa = tx_sa; + ctx.stats.tx_sa_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); + } + return; + } + for_each_possible_cpu(cpu) { - const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); + const struct macsec_tx_sa_stats *stats = + per_cpu_ptr(tx_sa->stats, cpu); - sum.OutPktsProtected += stats->OutPktsProtected; - sum.OutPktsEncrypted += stats->OutPktsEncrypted; + sum->OutPktsProtected += stats->OutPktsProtected; + sum->OutPktsEncrypted += stats->OutPktsEncrypted; } +} - if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) +static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) +{ + if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, + sum->OutPktsProtected) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, + sum->OutPktsEncrypted)) return -EMSGSIZE; return 0; } -static int copy_rx_sa_stats(struct sk_buff *skb, - struct macsec_rx_sa_stats __percpu *pstats) +static void get_rx_sa_stats(struct net_device *dev, + struct macsec_rx_sc *rx_sc, int an, + struct macsec_rx_sa *rx_sa, + struct macsec_rx_sa_stats *sum) { - struct macsec_rx_sa_stats sum = {0, }; + struct macsec_dev *macsec = macsec_priv(dev); int cpu; - for_each_possible_cpu(cpu) { - const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.sa.assoc_num = an; + ctx.sa.rx_sa = rx_sa; + ctx.stats.rx_sa_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + ctx.rx_sc = rx_sc; + macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); + } + return; + } - sum.InPktsOK += stats->InPktsOK; - sum.InPktsInvalid += stats->InPktsInvalid; - sum.InPktsNotValid += stats->InPktsNotValid; - sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; - sum.InPktsUnusedSA += stats->InPktsUnusedSA; + for_each_possible_cpu(cpu) { + const struct macsec_rx_sa_stats *stats = + per_cpu_ptr(rx_sa->stats, cpu); + + sum->InPktsOK += stats->InPktsOK; + sum->InPktsInvalid += stats->InPktsInvalid; + sum->InPktsNotValid += stats->InPktsNotValid; + sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; + sum->InPktsUnusedSA += stats->InPktsUnusedSA; } +} - if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) +static int copy_rx_sa_stats(struct sk_buff *skb, + struct macsec_rx_sa_stats *sum) +{ + if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, + sum->InPktsInvalid) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, + sum->InPktsNotValid) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, + sum->InPktsNotUsingSA) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, + sum->InPktsUnusedSA)) return -EMSGSIZE; return 0; } -static int copy_rx_sc_stats(struct sk_buff *skb, - struct pcpu_rx_sc_stats __percpu *pstats) +static void get_rx_sc_stats(struct net_device *dev, + struct macsec_rx_sc *rx_sc, + struct macsec_rx_sc_stats *sum) { - struct macsec_rx_sc_stats sum = {0, }; + struct macsec_dev *macsec = macsec_priv(dev); int cpu; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.stats.rx_sc_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + ctx.rx_sc = rx_sc; + macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); + } + return; + } + for_each_possible_cpu(cpu) { const struct pcpu_rx_sc_stats *stats; struct macsec_rx_sc_stats tmp; unsigned int start; - stats = per_cpu_ptr(pstats, cpu); + stats = per_cpu_ptr(rx_sc->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); - - sum.InOctetsValidated += tmp.InOctetsValidated; - sum.InOctetsDecrypted += tmp.InOctetsDecrypted; - sum.InPktsUnchecked += tmp.InPktsUnchecked; - sum.InPktsDelayed += tmp.InPktsDelayed; - sum.InPktsOK += tmp.InPktsOK; - sum.InPktsInvalid += tmp.InPktsInvalid; - sum.InPktsLate += tmp.InPktsLate; - sum.InPktsNotValid += tmp.InPktsNotValid; - sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; - sum.InPktsUnusedSA += tmp.InPktsUnusedSA; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + + sum->InOctetsValidated += tmp.InOctetsValidated; + sum->InOctetsDecrypted += tmp.InOctetsDecrypted; + sum->InPktsUnchecked += tmp.InPktsUnchecked; + sum->InPktsDelayed += tmp.InPktsDelayed; + sum->InPktsOK += tmp.InPktsOK; + sum->InPktsInvalid += tmp.InPktsInvalid; + sum->InPktsLate += tmp.InPktsLate; + sum->InPktsNotValid += tmp.InPktsNotValid; + sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; + sum->InPktsUnusedSA += tmp.InPktsUnusedSA; } +} +static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) +{ if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, - sum.InOctetsValidated, + sum->InOctetsValidated, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, - sum.InOctetsDecrypted, + sum->InOctetsDecrypted, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, - sum.InPktsUnchecked, + sum->InPktsUnchecked, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, - sum.InPktsDelayed, + sum->InPktsDelayed, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, - sum.InPktsOK, + sum->InPktsOK, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, - sum.InPktsInvalid, + sum->InPktsInvalid, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, - sum.InPktsLate, + sum->InPktsLate, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, - sum.InPktsNotValid, + sum->InPktsNotValid, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, - sum.InPktsNotUsingSA, + sum->InPktsNotUsingSA, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, - sum.InPktsUnusedSA, + sum->InPktsUnusedSA, MACSEC_RXSC_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; } -static int copy_tx_sc_stats(struct sk_buff *skb, - struct pcpu_tx_sc_stats __percpu *pstats) +static void get_tx_sc_stats(struct net_device *dev, + struct macsec_tx_sc_stats *sum) { - struct macsec_tx_sc_stats sum = {0, }; + struct macsec_dev *macsec = macsec_priv(dev); int cpu; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.stats.tx_sc_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); + } + return; + } + for_each_possible_cpu(cpu) { const struct pcpu_tx_sc_stats *stats; struct macsec_tx_sc_stats tmp; unsigned int start; - stats = per_cpu_ptr(pstats, cpu); + stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); - sum.OutPktsProtected += tmp.OutPktsProtected; - sum.OutPktsEncrypted += tmp.OutPktsEncrypted; - sum.OutOctetsProtected += tmp.OutOctetsProtected; - sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; + sum->OutPktsProtected += tmp.OutPktsProtected; + sum->OutPktsEncrypted += tmp.OutPktsEncrypted; + sum->OutOctetsProtected += tmp.OutOctetsProtected; + sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; } +} +static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) +{ if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, - sum.OutPktsProtected, + sum->OutPktsProtected, MACSEC_TXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, - sum.OutPktsEncrypted, + sum->OutPktsEncrypted, MACSEC_TXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, - sum.OutOctetsProtected, + sum->OutOctetsProtected, MACSEC_TXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, - sum.OutOctetsEncrypted, + sum->OutOctetsEncrypted, MACSEC_TXSC_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; } -static int copy_secy_stats(struct sk_buff *skb, - struct pcpu_secy_stats __percpu *pstats) +static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) { - struct macsec_dev_stats sum = {0, }; + struct macsec_dev *macsec = macsec_priv(dev); int cpu; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.stats.dev_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + macsec_offload(ops->mdo_get_dev_stats, &ctx); + } + return; + } + for_each_possible_cpu(cpu) { const struct pcpu_secy_stats *stats; struct macsec_dev_stats tmp; unsigned int start; - stats = per_cpu_ptr(pstats, cpu); + stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); - - sum.OutPktsUntagged += tmp.OutPktsUntagged; - sum.InPktsUntagged += tmp.InPktsUntagged; - sum.OutPktsTooLong += tmp.OutPktsTooLong; - sum.InPktsNoTag += tmp.InPktsNoTag; - sum.InPktsBadTag += tmp.InPktsBadTag; - sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; - sum.InPktsNoSCI += tmp.InPktsNoSCI; - sum.InPktsOverrun += tmp.InPktsOverrun; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + + sum->OutPktsUntagged += tmp.OutPktsUntagged; + sum->InPktsUntagged += tmp.InPktsUntagged; + sum->OutPktsTooLong += tmp.OutPktsTooLong; + sum->InPktsNoTag += tmp.InPktsNoTag; + sum->InPktsBadTag += tmp.InPktsBadTag; + sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; + sum->InPktsNoSCI += tmp.InPktsNoSCI; + sum->InPktsOverrun += tmp.InPktsOverrun; } +} +static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) +{ if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, - sum.OutPktsUntagged, + sum->OutPktsUntagged, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, - sum.InPktsUntagged, + sum->InPktsUntagged, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, - sum.OutPktsTooLong, + sum->OutPktsTooLong, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, - sum.InPktsNoTag, + sum->InPktsNoTag, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, - sum.InPktsBadTag, + sum->InPktsBadTag, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, - sum.InPktsUnknownSCI, + sum->InPktsUnknownSCI, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, - sum.InPktsNoSCI, + sum->InPktsNoSCI, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, - sum.InPktsOverrun, + sum->InPktsOverrun, MACSEC_SECY_STATS_ATTR_PAD)) return -EMSGSIZE; @@ -2364,7 +2991,8 @@ static int copy_secy_stats(struct sk_buff *skb, static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) { struct macsec_tx_sc *tx_sc = &secy->tx_sc; - struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); + struct nlattr *secy_nest = nla_nest_start_noflag(skb, + MACSEC_ATTR_SECY); u64 csid; if (!secy_nest) @@ -2372,10 +3000,10 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) switch (secy->key_len) { case MACSEC_GCM_AES_128_SAK_LEN: - csid = MACSEC_DEFAULT_CIPHER_ID; + csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; break; case MACSEC_GCM_AES_256_SAK_LEN: - csid = MACSEC_CIPHER_ID_GCM_AES_256; + csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; break; default: goto cancel; @@ -2410,15 +3038,22 @@ cancel: return 1; } -static int dump_secy(struct macsec_secy *secy, struct net_device *dev, - struct sk_buff *skb, struct netlink_callback *cb) +static noinline_for_stack int +dump_secy(struct macsec_secy *secy, struct net_device *dev, + struct sk_buff *skb, struct netlink_callback *cb) { - struct macsec_rx_sc *rx_sc; + struct macsec_tx_sc_stats tx_sc_stats = {0, }; + struct macsec_tx_sa_stats tx_sa_stats = {0, }; + struct macsec_rx_sc_stats rx_sc_stats = {0, }; + struct macsec_rx_sa_stats rx_sa_stats = {0, }; + struct macsec_dev *macsec = netdev_priv(dev); + struct macsec_dev_stats dev_stats = {0, }; struct macsec_tx_sc *tx_sc = &secy->tx_sc; struct nlattr *txsa_list, *rxsc_list; - int i, j; - void *hdr; + struct macsec_rx_sc *rx_sc; struct nlattr *attr; + void *hdr; + int i, j; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); @@ -2430,71 +3065,94 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; + attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); + if (!attr) + goto nla_put_failure; + if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) + goto nla_put_failure; + nla_nest_end(skb, attr); + if (nla_put_secy(secy, skb)) goto nla_put_failure; - attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); + attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); if (!attr) goto nla_put_failure; - if (copy_tx_sc_stats(skb, tx_sc->stats)) { + + get_tx_sc_stats(dev, &tx_sc_stats); + if (copy_tx_sc_stats(skb, &tx_sc_stats)) { nla_nest_cancel(skb, attr); goto nla_put_failure; } nla_nest_end(skb, attr); - attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); + attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); if (!attr) goto nla_put_failure; - if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { + get_secy_stats(dev, &dev_stats); + if (copy_secy_stats(skb, &dev_stats)) { nla_nest_cancel(skb, attr); goto nla_put_failure; } nla_nest_end(skb, attr); - txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); + txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); if (!txsa_list) goto nla_put_failure; for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); struct nlattr *txsa_nest; + u64 pn; + int pn_len; if (!tx_sa) continue; - txsa_nest = nla_nest_start(skb, j++); + txsa_nest = nla_nest_start_noflag(skb, j++); if (!txsa_nest) { nla_nest_cancel(skb, txsa_list); goto nla_put_failure; } - if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || - nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || - nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || - nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { + attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); + if (!attr) { nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_list); goto nla_put_failure; } - - attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); - if (!attr) { + memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); + get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); + if (copy_tx_sa_stats(skb, &tx_sa_stats)) { + nla_nest_cancel(skb, attr); nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_list); goto nla_put_failure; } - if (copy_tx_sa_stats(skb, tx_sa->stats)) { - nla_nest_cancel(skb, attr); + nla_nest_end(skb, attr); + + if (secy->xpn) { + pn = tx_sa->next_pn; + pn_len = MACSEC_XPN_PN_LEN; + } else { + pn = tx_sa->next_pn_halves.lower; + pn_len = MACSEC_DEFAULT_PN_LEN; + } + + if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || + nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || + nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || + (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || + nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_list); goto nla_put_failure; } - nla_nest_end(skb, attr); nla_nest_end(skb, txsa_nest); } nla_nest_end(skb, txsa_list); - rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); + rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); if (!rxsc_list) goto nla_put_failure; @@ -2502,7 +3160,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, for_each_rxsc_rtnl(secy, rx_sc) { int k; struct nlattr *rxsa_list; - struct nlattr *rxsc_nest = nla_nest_start(skb, j++); + struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); if (!rxsc_nest) { nla_nest_cancel(skb, rxsc_list); @@ -2517,13 +3175,15 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, goto nla_put_failure; } - attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); + attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); if (!attr) { nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } - if (copy_rx_sc_stats(skb, rx_sc->stats)) { + memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); + get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); + if (copy_rx_sc_stats(skb, &rx_sc_stats)) { nla_nest_cancel(skb, attr); nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); @@ -2531,7 +3191,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, } nla_nest_end(skb, attr); - rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); + rxsa_list = nla_nest_start_noflag(skb, + MACSEC_RXSC_ATTR_SA_LIST); if (!rxsa_list) { nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); @@ -2541,11 +3202,13 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); struct nlattr *rxsa_nest; + u64 pn; + int pn_len; if (!rx_sa) continue; - rxsa_nest = nla_nest_start(skb, k++); + rxsa_nest = nla_nest_start_noflag(skb, k++); if (!rxsa_nest) { nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsc_nest); @@ -2553,14 +3216,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, goto nla_put_failure; } - attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); + attr = nla_nest_start_noflag(skb, + MACSEC_SA_ATTR_STATS); if (!attr) { nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } - if (copy_rx_sa_stats(skb, rx_sa->stats)) { + memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); + get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); + if (copy_rx_sa_stats(skb, &rx_sa_stats)) { nla_nest_cancel(skb, attr); nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsc_nest); @@ -2569,9 +3235,18 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, } nla_nest_end(skb, attr); + if (secy->xpn) { + pn = rx_sa->next_pn; + pn_len = MACSEC_XPN_PN_LEN; + } else { + pn = rx_sa->next_pn_halves.lower; + pn_len = MACSEC_DEFAULT_PN_LEN; + } + if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || - nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || + nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || + (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { nla_nest_cancel(skb, rxsa_nest); nla_nest_cancel(skb, rxsc_nest); @@ -2633,64 +3308,70 @@ done: return skb->len; } -static const struct genl_ops macsec_genl_ops[] = { +static const struct genl_small_ops macsec_genl_ops[] = { { .cmd = MACSEC_CMD_GET_TXSC, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .dumpit = macsec_dump_txsc, - .policy = macsec_genl_policy, }, { .cmd = MACSEC_CMD_ADD_RXSC, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_add_rxsc, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_DEL_RXSC, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_del_rxsc, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_RXSC, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_rxsc, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_ADD_TXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_add_txsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_DEL_TXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_del_txsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_TXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_txsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_ADD_RXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_add_rxsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_DEL_RXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_del_rxsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_RXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_rxsa, - .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MACSEC_CMD_UPD_OFFLOAD, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = macsec_upd_offload, .flags = GENL_ADMIN_PERM, }, }; @@ -2700,12 +3381,48 @@ static struct genl_family macsec_fam __ro_after_init = { .hdrsize = 0, .version = MACSEC_GENL_VERSION, .maxattr = MACSEC_ATTR_MAX, + .policy = macsec_genl_policy, .netnsok = true, .module = THIS_MODULE, - .ops = macsec_genl_ops, - .n_ops = ARRAY_SIZE(macsec_genl_ops), + .small_ops = macsec_genl_ops, + .n_small_ops = ARRAY_SIZE(macsec_genl_ops), + .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1, }; +static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb, + struct net_device *dev) +{ + struct macsec_dev *macsec = macsec_priv(dev); + const struct macsec_ops *ops; + struct phy_device *phydev; + struct macsec_context ctx; + int skb_final_len; + int err; + + ops = macsec_get_ops(macsec, &ctx); + skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom + + ops->needed_tailroom; + if (unlikely(skb_final_len > macsec->real_dev->mtu)) { + err = -EINVAL; + goto cleanup; + } + + phydev = macsec->real_dev->phydev; + + err = skb_ensure_writable_head_tail(skb, dev); + if (unlikely(err < 0)) + goto cleanup; + + err = ops->mdo_insert_tx_tag(phydev, skb); + if (unlikely(err)) + goto cleanup; + + return skb; +cleanup: + kfree_skb(skb); + return ERR_PTR(err); +} + static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -2714,6 +3431,25 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, struct pcpu_secy_stats *secy_stats; int ret, len; + if (macsec_is_offloaded(netdev_priv(dev))) { + struct metadata_dst *md_dst = secy->tx_sc.md_dst; + + skb_dst_drop(skb); + dst_hold(&md_dst->dst); + skb_dst_set(skb, &md_dst->dst); + + if (macsec->insert_tx_tag) { + skb = macsec_insert_tx_tag(skb, dev); + if (IS_ERR(skb)) { + DEV_STATS_INC(dev, tx_dropped); + return NETDEV_TX_OK; + } + } + + skb->dev = macsec->real_dev; + return dev_queue_xmit(skb); + } + /* 10.5 */ if (!secy->protect_frames) { secy_stats = this_cpu_ptr(macsec->stats); @@ -2729,21 +3465,21 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, if (!secy->operational) { kfree_skb(skb); - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } + len = skb->len; skb = macsec_encrypt(skb, dev); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EINPROGRESS) - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); macsec_encrypt_finish(skb, dev); - len = skb->len; ret = dev_queue_xmit(skb); count_tx(dev, ret, len); return ret; @@ -2751,7 +3487,10 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, #define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) -static struct lock_class_key macsec_netdev_addr_lock_key; + +#define MACSEC_OFFLOAD_FEATURES \ + (MACSEC_FEATURES | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES | \ + NETIF_F_LRO | NETIF_F_RXHASH | NETIF_F_CSUM_MASK | NETIF_F_RXCSUM) static int macsec_dev_init(struct net_device *dev) { @@ -2759,29 +3498,30 @@ static int macsec_dev_init(struct net_device *dev) struct net_device *real_dev = macsec->real_dev; int err; - dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!dev->tstats) - return -ENOMEM; - err = gro_cells_init(&macsec->gro_cells, dev); - if (err) { - free_percpu(dev->tstats); + if (err) return err; - } - dev->features = real_dev->features & MACSEC_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; + macsec_inherit_tso_max(dev); + + dev->hw_features = real_dev->hw_features & MACSEC_OFFLOAD_FEATURES; + dev->hw_features |= NETIF_F_GSO_SOFTWARE; + + dev->features = real_dev->features & MACSEC_OFFLOAD_FEATURES; + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->lltx = true; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; - dev->needed_headroom = real_dev->needed_headroom + - MACSEC_NEEDED_HEADROOM; - dev->needed_tailroom = real_dev->needed_tailroom + - MACSEC_NEEDED_TAILROOM; + macsec_set_head_tail_room(dev); if (is_zero_ether_addr(dev->dev_addr)) eth_hw_addr_inherit(dev, real_dev); if (is_zero_ether_addr(dev->broadcast)) memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); + /* Get macsec's reference to real_dev */ + netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); + return 0; } @@ -2790,7 +3530,6 @@ static void macsec_dev_uninit(struct net_device *dev) struct macsec_dev *macsec = macsec_priv(dev); gro_cells_destroy(&macsec->gro_cells); - free_percpu(dev->tstats); } static netdev_features_t macsec_fix_features(struct net_device *dev, @@ -2798,10 +3537,13 @@ static netdev_features_t macsec_fix_features(struct net_device *dev, { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; + netdev_features_t mask; + + mask = macsec_is_offloaded(macsec) ? MACSEC_OFFLOAD_FEATURES + : MACSEC_FEATURES; - features &= (real_dev->features & MACSEC_FEATURES) | + features &= (real_dev->features & mask) | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; - features |= NETIF_F_LLTX; return features; } @@ -2828,6 +3570,23 @@ static int macsec_dev_open(struct net_device *dev) goto clear_allmulti; } + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + err = -EOPNOTSUPP; + goto clear_allmulti; + } + + ctx.secy = &macsec->secy; + err = macsec_offload(ops->mdo_dev_open, &ctx); + if (err) + goto clear_allmulti; + } + if (netif_carrier_ok(real_dev)) netif_carrier_on(dev); @@ -2848,6 +3607,18 @@ static int macsec_dev_stop(struct net_device *dev) netif_carrier_off(dev); + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.secy = &macsec->secy; + macsec_offload(ops->mdo_dev_stop, &ctx); + } + } + dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); @@ -2890,23 +3661,50 @@ static int macsec_set_mac_address(struct net_device *dev, void *p) struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; struct sockaddr *addr = p; + u8 old_addr[ETH_ALEN]; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - if (!(dev->flags & IFF_UP)) - goto out; + if (dev->flags & IFF_UP) { + err = dev_uc_add(real_dev, addr->sa_data); + if (err < 0) + return err; + } - err = dev_uc_add(real_dev, addr->sa_data); - if (err < 0) - return err; + ether_addr_copy(old_addr, dev->dev_addr); + eth_hw_addr_set(dev, addr->sa_data); - dev_uc_del(real_dev, dev->dev_addr); + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (!ops) { + err = -EOPNOTSUPP; + goto restore_old_addr; + } + + ctx.secy = &macsec->secy; + err = macsec_offload(ops->mdo_upd_secy, &ctx); + if (err) + goto restore_old_addr; + } + + if (dev->flags & IFF_UP) + dev_uc_del(real_dev, old_addr); -out: - ether_addr_copy(dev->dev_addr, addr->sa_data); return 0; + +restore_old_addr: + if (dev->flags & IFF_UP) + dev_uc_del(real_dev, addr->sa_data); + + eth_hw_addr_set(dev, old_addr); + + return err; } static int macsec_change_mtu(struct net_device *dev, int new_mtu) @@ -2917,7 +3715,7 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu) if (macsec->real_dev->mtu - extra < new_mtu) return -ERANGE; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -2925,43 +3723,19 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu) static void macsec_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *s) { - int cpu; - if (!dev->tstats) return; - for_each_possible_cpu(cpu) { - struct pcpu_sw_netstats *stats; - struct pcpu_sw_netstats tmp; - int start; - - stats = per_cpu_ptr(dev->tstats, cpu); - do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - tmp.rx_packets = stats->rx_packets; - tmp.rx_bytes = stats->rx_bytes; - tmp.tx_packets = stats->tx_packets; - tmp.tx_bytes = stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + dev_fetch_sw_netstats(s, dev->tstats); - s->rx_packets += tmp.rx_packets; - s->rx_bytes += tmp.rx_bytes; - s->tx_packets += tmp.tx_packets; - s->tx_bytes += tmp.tx_bytes; - } - - s->rx_dropped = dev->stats.rx_dropped; - s->tx_dropped = dev->stats.tx_dropped; + s->rx_dropped = DEV_STATS_READ(dev, rx_dropped); + s->tx_dropped = DEV_STATS_READ(dev, tx_dropped); + s->rx_errors = DEV_STATS_READ(dev, rx_errors); } static int macsec_get_iflink(const struct net_device *dev) { - return macsec_priv(dev)->real_dev->ifindex; -} - -static int macsec_get_nest_level(struct net_device *dev) -{ - return macsec_priv(dev)->nest_level; + return READ_ONCE(macsec_priv(dev)->real_dev->ifindex); } static const struct net_device_ops macsec_netdev_ops = { @@ -2977,37 +3751,41 @@ static const struct net_device_ops macsec_netdev_ops = { .ndo_start_xmit = macsec_start_xmit, .ndo_get_stats64 = macsec_get_stats64, .ndo_get_iflink = macsec_get_iflink, - .ndo_get_lock_subclass = macsec_get_nest_level, }; static const struct device_type macsec_type = { .name = "macsec", }; +static int validate_cipher_suite(const struct nlattr *attr, + struct netlink_ext_ack *extack); static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, - [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, - [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, + [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, + [IFLA_MACSEC_ICV_LEN] = NLA_POLICY_RANGE(NLA_U8, MACSEC_MIN_ICV_LEN, MACSEC_STD_ICV_LEN), + [IFLA_MACSEC_CIPHER_SUITE] = NLA_POLICY_VALIDATE_FN(NLA_U64, validate_cipher_suite), [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, - [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, - [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, - [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, - [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, - [IFLA_MACSEC_ES] = { .type = NLA_U8 }, - [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, - [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, - [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, + [IFLA_MACSEC_ENCODING_SA] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1), + [IFLA_MACSEC_ENCRYPT] = NLA_POLICY_MAX(NLA_U8, 1), + [IFLA_MACSEC_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1), + [IFLA_MACSEC_INC_SCI] = NLA_POLICY_MAX(NLA_U8, 1), + [IFLA_MACSEC_ES] = NLA_POLICY_MAX(NLA_U8, 1), + [IFLA_MACSEC_SCB] = NLA_POLICY_MAX(NLA_U8, 1), + [IFLA_MACSEC_REPLAY_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1), + [IFLA_MACSEC_VALIDATION] = NLA_POLICY_MAX(NLA_U8, MACSEC_VALIDATE_MAX), + [IFLA_MACSEC_OFFLOAD] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX), }; static void macsec_free_netdev(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); - struct net_device *real_dev = macsec->real_dev; + dst_release(&macsec->secy.tx_sc.md_dst->dst); free_percpu(macsec->stats); free_percpu(macsec->secy.tx_sc.stats); - dev_put(real_dev); + /* Get rid of the macsec's reference to real_dev */ + netdev_put(macsec->real_dev, &macsec->dev_tracker); } static void macsec_setup(struct net_device *dev) @@ -3015,7 +3793,7 @@ static void macsec_setup(struct net_device *dev) ether_setup(dev); dev->min_mtu = 0; dev->max_mtu = ETH_MAX_MTU; - dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_NO_QUEUE | IFF_UNICAST_FLT; dev->netdev_ops = &macsec_netdev_ops; dev->needs_free_netdev = true; dev->priv_destructor = macsec_free_netdev; @@ -3042,9 +3820,6 @@ static int macsec_changelink_common(struct net_device *dev, secy->operational = tx_sa && tx_sa->active; } - if (data[IFLA_MACSEC_WINDOW]) - secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); - if (data[IFLA_MACSEC_ENCRYPT]) tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); @@ -3071,15 +3846,35 @@ static int macsec_changelink_common(struct net_device *dev, case MACSEC_CIPHER_ID_GCM_AES_128: case MACSEC_DEFAULT_CIPHER_ID: secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; + secy->xpn = false; break; case MACSEC_CIPHER_ID_GCM_AES_256: secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; + secy->xpn = false; + break; + case MACSEC_CIPHER_ID_GCM_AES_XPN_128: + secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; + secy->xpn = true; + break; + case MACSEC_CIPHER_ID_GCM_AES_XPN_256: + secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; + secy->xpn = true; break; default: return -EINVAL; } } + if (data[IFLA_MACSEC_WINDOW]) { + secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); + + /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window + * for XPN cipher suites */ + if (secy->xpn && + secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) + return -EINVAL; + } + return 0; } @@ -3087,6 +3882,13 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + struct macsec_dev *macsec = macsec_priv(dev); + bool macsec_offload_state_change = false; + enum macsec_offload offload; + struct macsec_tx_sc tx_sc; + struct macsec_secy secy; + int ret; + if (!data) return 0; @@ -3096,7 +3898,50 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], data[IFLA_MACSEC_PORT]) return -EINVAL; - return macsec_changelink_common(dev, data); + /* Keep a copy of unmodified secy and tx_sc, in case the offload + * propagation fails, to revert macsec_changelink_common. + */ + memcpy(&secy, &macsec->secy, sizeof(secy)); + memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); + + ret = macsec_changelink_common(dev, data); + if (ret) + goto cleanup; + + if (data[IFLA_MACSEC_OFFLOAD]) { + offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); + if (macsec->offload != offload) { + macsec_offload_state_change = true; + ret = macsec_update_offload(dev, offload); + if (ret) + goto cleanup; + } + } + + /* If h/w offloading is available, propagate to the device */ + if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (!ops) { + ret = -EOPNOTSUPP; + goto cleanup; + } + + ctx.secy = &macsec->secy; + ret = macsec_offload(ops->mdo_upd_secy, &ctx); + if (ret) + goto cleanup; + } + + return 0; + +cleanup: + memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); + memcpy(&macsec->secy, &secy, sizeof(secy)); + + return ret; } static void macsec_del_dev(struct macsec_dev *macsec) @@ -3125,6 +3970,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (ops) { + ctx.secy = &macsec->secy; + macsec_offload(ops->mdo_del_secy, &ctx); + } + } + unregister_netdevice_queue(dev, head); list_del_rcu(&macsec->secys); macsec_del_dev(macsec); @@ -3202,10 +4059,15 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) return -ENOMEM; secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); - if (!secy->tx_sc.stats) { - free_percpu(macsec->stats); + if (!secy->tx_sc.stats) + return -ENOMEM; + + secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); + if (!secy->tx_sc.md_dst) + /* macsec and secy percpu stats will be freed when unregistering + * net_device in macsec_free_netdev() + */ return -ENOMEM; - } if (sci == MACSEC_UNDEF_SCI) sci = dev_to_sci(dev, MACSEC_PORT_ES); @@ -3217,8 +4079,10 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) secy->validate_frames = MACSEC_VALIDATE_DEFAULT; secy->protect_frames = true; secy->replay_protect = false; + secy->xpn = DEFAULT_XPN; secy->sci = sci; + secy->tx_sc.md_dst->u.macsec_info.sci = sci; secy->tx_sc.active = true; secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; secy->tx_sc.encrypt = DEFAULT_ENCRYPT; @@ -3229,30 +4093,61 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) return 0; } -static int macsec_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static struct lock_class_key macsec_netdev_addr_lock_key; + +static int macsec_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); struct macsec_dev *macsec = macsec_priv(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; + rx_handler_func_t *rx_handler; + u8 icv_len = MACSEC_DEFAULT_ICV_LEN; struct net_device *real_dev; - int err; + int err, mtu; sci_t sci; - u8 icv_len = DEFAULT_ICV_LEN; - rx_handler_func_t *rx_handler; if (!tb[IFLA_LINK]) return -EINVAL; - real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); + real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; + if (real_dev->type != ARPHRD_ETHER) + return -EINVAL; dev->priv_flags |= IFF_MACSEC; macsec->real_dev = real_dev; + if (data && data[IFLA_MACSEC_OFFLOAD]) + macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); + else + /* MACsec offloading is off by default */ + macsec->offload = MACSEC_OFFLOAD_OFF; + + /* Check if the offloading mode is supported by the underlying layers */ + if (macsec->offload != MACSEC_OFFLOAD_OFF && + !macsec_check_offload(macsec->offload, macsec)) + return -EOPNOTSUPP; + + /* send_sci must be set to true when transmit sci explicitly is set */ + if ((data && data[IFLA_MACSEC_SCI]) && + (data && data[IFLA_MACSEC_INC_SCI])) { + u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); + + if (!send_sci) + return -EINVAL; + } + if (data && data[IFLA_MACSEC_ICV_LEN]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); - dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); + mtu = real_dev->mtu - icv_len - macsec_extra_len(true); + if (mtu < 0) + dev->mtu = 0; + else + dev->mtu = mtu; rx_handler = rtnl_dereference(real_dev->rx_handler); if (rx_handler && rx_handler != macsec_handle_frame) @@ -3262,13 +4157,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (err < 0) return err; - dev_hold(real_dev); - - macsec->nest_level = dev_get_nest_level(real_dev) + 1; netdev_lockdep_set_classes(dev); - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &macsec_netdev_addr_lock_key, - macsec_get_nest_level(dev)); + lockdep_set_class(&dev->addr_list_lock, + &macsec_netdev_addr_lock_key); err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) @@ -3299,10 +4190,28 @@ static int macsec_newlink(struct net *net, struct net_device *dev, goto del_dev; } + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.secy = &macsec->secy; + err = macsec_offload(ops->mdo_add_secy, &ctx); + if (err) + goto del_dev; + + macsec->insert_tx_tag = + macsec_needs_tx_tag(macsec, ops); + } + } + err = register_macsec_dev(real_dev, dev); if (err < 0) goto del_dev; + netdev_update_features(dev); netif_stacked_transfer_operstate(real_dev, dev); linkwatch_fire_event(dev); @@ -3319,23 +4228,33 @@ unregister: return err; } +static int validate_cipher_suite(const struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + switch (nla_get_u64(attr)) { + case MACSEC_CIPHER_ID_GCM_AES_128: + case MACSEC_CIPHER_ID_GCM_AES_256: + case MACSEC_CIPHER_ID_GCM_AES_XPN_128: + case MACSEC_CIPHER_ID_GCM_AES_XPN_256: + case MACSEC_DEFAULT_CIPHER_ID: + return 0; + default: + return -EINVAL; + } +} + static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - u64 csid = MACSEC_DEFAULT_CIPHER_ID; - u8 icv_len = DEFAULT_ICV_LEN; - int flag; + u8 icv_len = MACSEC_DEFAULT_ICV_LEN; bool es, scb, sci; if (!data) return 0; - if (data[IFLA_MACSEC_CIPHER_SUITE]) - csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); - if (data[IFLA_MACSEC_ICV_LEN]) { icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); - if (icv_len != DEFAULT_ICV_LEN) { + if (icv_len != MACSEC_DEFAULT_ICV_LEN) { char dummy_key[DEFAULT_SAK_LEN] = { 0 }; struct crypto_aead *dummy_tfm; @@ -3348,43 +4267,13 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], } } - switch (csid) { - case MACSEC_CIPHER_ID_GCM_AES_128: - case MACSEC_CIPHER_ID_GCM_AES_256: - case MACSEC_DEFAULT_CIPHER_ID: - if (icv_len < MACSEC_MIN_ICV_LEN || - icv_len > MACSEC_STD_ICV_LEN) - return -EINVAL; - break; - default: - return -EINVAL; - } - - if (data[IFLA_MACSEC_ENCODING_SA]) { - if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) - return -EINVAL; - } - - for (flag = IFLA_MACSEC_ENCODING_SA + 1; - flag < IFLA_MACSEC_VALIDATION; - flag++) { - if (data[flag]) { - if (nla_get_u8(data[flag]) > 1) - return -EINVAL; - } - } - - es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; - sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; - scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; + es = nla_get_u8_default(data[IFLA_MACSEC_ES], false); + sci = nla_get_u8_default(data[IFLA_MACSEC_INC_SCI], false); + scb = nla_get_u8_default(data[IFLA_MACSEC_SCB], false); if ((sci && (scb || es)) || (scb && es)) return -EINVAL; - if (data[IFLA_MACSEC_VALIDATION] && - nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) - return -EINVAL; - if ((data[IFLA_MACSEC_REPLAY_PROTECT] && nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && !data[IFLA_MACSEC_WINDOW]) @@ -3398,6 +4287,18 @@ static struct net *macsec_get_link_net(const struct net_device *dev) return dev_net(macsec_priv(dev)->real_dev); } +struct net_device *macsec_get_real_dev(const struct net_device *dev) +{ + return macsec_priv(dev)->real_dev; +} +EXPORT_SYMBOL_GPL(macsec_get_real_dev); + +bool macsec_netdev_is_offloaded(struct net_device *dev) +{ + return macsec_is_offloaded(macsec_priv(dev)); +} +EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded); + static size_t macsec_get_size(const struct net_device *dev) { return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ @@ -3412,22 +4313,28 @@ static size_t macsec_get_size(const struct net_device *dev) nla_total_size(1) + /* IFLA_MACSEC_SCB */ nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ + nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 0; } static int macsec_fill_info(struct sk_buff *skb, const struct net_device *dev) { - struct macsec_secy *secy = &macsec_priv(dev)->secy; - struct macsec_tx_sc *tx_sc = &secy->tx_sc; + struct macsec_tx_sc *tx_sc; + struct macsec_dev *macsec; + struct macsec_secy *secy; u64 csid; + macsec = macsec_priv(dev); + secy = &macsec->secy; + tx_sc = &secy->tx_sc; + switch (secy->key_len) { case MACSEC_GCM_AES_128_SAK_LEN: - csid = MACSEC_DEFAULT_CIPHER_ID; + csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; break; case MACSEC_GCM_AES_256_SAK_LEN: - csid = MACSEC_CIPHER_ID_GCM_AES_256; + csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; break; default: goto nla_put_failure; @@ -3446,6 +4353,7 @@ static int macsec_fill_info(struct sk_buff *skb, nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || + nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 0) goto nla_put_failure; @@ -3484,31 +4392,26 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); + struct macsec_rxh_data *rxd; + struct macsec_dev *m, *n; LIST_HEAD(head); if (!is_macsec_master(real_dev)) return NOTIFY_DONE; + rxd = macsec_data_rtnl(real_dev); + switch (event) { case NETDEV_DOWN: case NETDEV_UP: - case NETDEV_CHANGE: { - struct macsec_dev *m, *n; - struct macsec_rxh_data *rxd; - - rxd = macsec_data_rtnl(real_dev); + case NETDEV_CHANGE: list_for_each_entry_safe(m, n, &rxd->secys, secys) { struct net_device *dev = m->secy.netdev; netif_stacked_transfer_operstate(real_dev, dev); } break; - } - case NETDEV_UNREGISTER: { - struct macsec_dev *m, *n; - struct macsec_rxh_data *rxd; - - rxd = macsec_data_rtnl(real_dev); + case NETDEV_UNREGISTER: list_for_each_entry_safe(m, n, &rxd->secys, secys) { macsec_common_dellink(m->secy.netdev, &head); } @@ -3518,12 +4421,7 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, unregister_netdevice_many(&head); break; - } - case NETDEV_CHANGEMTU: { - struct macsec_dev *m; - struct macsec_rxh_data *rxd; - - rxd = macsec_data_rtnl(real_dev); + case NETDEV_CHANGEMTU: list_for_each_entry(m, &rxd->secys, secys) { struct net_device *dev = m->secy.netdev; unsigned int mtu = real_dev->mtu - (m->secy.icv_len + @@ -3532,7 +4430,13 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, if (dev->mtu > mtu) dev_set_mtu(dev, mtu); } - } + break; + case NETDEV_FEAT_CHANGE: + list_for_each_entry(m, &rxd->secys, secys) { + macsec_inherit_tso_max(m->secy.netdev); + netdev_update_features(m->secy.netdev); + } + break; } return NOTIFY_OK; |
