diff options
Diffstat (limited to 'net/ethtool')
41 files changed, 19626 insertions, 0 deletions
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile new file mode 100644 index 000000000000..629c10916670 --- /dev/null +++ b/net/ethtool/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-y += ioctl.o common.o + +obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o + +ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o rss.o \ + linkstate.o debug.o wol.o features.o privflags.o rings.o \ + channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \ + tunnels.o fec.o eeprom.o stats.o phc_vclocks.o mm.o \ + module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o \ + phy.o tsconfig.o mse.o diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c new file mode 100644 index 000000000000..f0883357d12e --- /dev/null +++ b/net/ethtool/bitset.c @@ -0,0 +1,873 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool_netlink.h> +#include <linux/bitmap.h> +#include "netlink.h" +#include "bitset.h" + +/* Some bitmaps are internally represented as an array of unsigned long, some + * as an array of u32 (some even as single u32 for now). To avoid the need of + * wrappers on caller side, we provide two set of functions: those with "32" + * suffix in their names expect u32 based bitmaps, those without it expect + * unsigned long bitmaps. + */ + +static u32 ethnl_lower_bits(unsigned int n) +{ + return ~(u32)0 >> (32 - n % 32); +} + +static u32 ethnl_upper_bits(unsigned int n) +{ + return ~(u32)0 << (n % 32); +} + +/** + * ethnl_bitmap32_clear() - Clear u32 based bitmap + * @dst: bitmap to clear + * @start: beginning of the interval + * @end: end of the interval + * @mod: set if bitmap was modified + * + * Clear @nbits bits of a bitmap with indices @start <= i < @end + */ +static void ethnl_bitmap32_clear(u32 *dst, unsigned int start, unsigned int end, + bool *mod) +{ + unsigned int start_word = start / 32; + unsigned int end_word = end / 32; + unsigned int i; + u32 mask; + + if (end <= start) + return; + + if (start % 32) { + mask = ethnl_upper_bits(start); + if (end_word == start_word) { + mask &= ethnl_lower_bits(end); + if (dst[start_word] & mask) { + dst[start_word] &= ~mask; + *mod = true; + } + return; + } + if (dst[start_word] & mask) { + dst[start_word] &= ~mask; + *mod = true; + } + start_word++; + } + + for (i = start_word; i < end_word; i++) { + if (dst[i]) { + dst[i] = 0; + *mod = true; + } + } + if (end % 32) { + mask = ethnl_lower_bits(end); + if (dst[end_word] & mask) { + dst[end_word] &= ~mask; + *mod = true; + } + } +} + +/** + * ethnl_bitmap32_not_zero() - Check if any bit is set in an interval + * @map: bitmap to test + * @start: beginning of the interval + * @end: end of the interval + * + * Return: true if there is non-zero bit with index @start <= i < @end, + * false if the whole interval is zero + */ +static bool ethnl_bitmap32_not_zero(const u32 *map, unsigned int start, + unsigned int end) +{ + unsigned int start_word = start / 32; + unsigned int end_word = end / 32; + u32 mask; + + if (end <= start) + return true; + + if (start % 32) { + mask = ethnl_upper_bits(start); + if (end_word == start_word) { + mask &= ethnl_lower_bits(end); + return map[start_word] & mask; + } + if (map[start_word] & mask) + return true; + start_word++; + } + + if (!memchr_inv(map + start_word, '\0', + (end_word - start_word) * sizeof(u32))) + return true; + if (end % 32 == 0) + return true; + return map[end_word] & ethnl_lower_bits(end); +} + +/** + * ethnl_bitmap32_update() - Modify u32 based bitmap according to value/mask + * pair + * @dst: bitmap to update + * @nbits: bit size of the bitmap + * @value: values to set + * @mask: mask of bits to set + * @mod: set to true if bitmap is modified, preserve if not + * + * Set bits in @dst bitmap which are set in @mask to values from @value, leave + * the rest untouched. If destination bitmap was modified, set @mod to true, + * leave as it is if not. + */ +static void ethnl_bitmap32_update(u32 *dst, unsigned int nbits, + const u32 *value, const u32 *mask, bool *mod) +{ + while (nbits > 0) { + u32 real_mask = mask ? *mask : ~(u32)0; + u32 new_value; + + if (nbits < 32) + real_mask &= ethnl_lower_bits(nbits); + new_value = (*dst & ~real_mask) | (*value & real_mask); + if (new_value != *dst) { + *dst = new_value; + *mod = true; + } + + if (nbits <= 32) + break; + dst++; + nbits -= 32; + value++; + if (mask) + mask++; + } +} + +static bool ethnl_bitmap32_test_bit(const u32 *map, unsigned int index) +{ + return map[index / 32] & (1U << (index % 32)); +} + +/** + * ethnl_bitset32_size() - Calculate size of bitset nested attribute + * @val: value bitmap (u32 based) + * @mask: mask bitmap (u32 based, optional) + * @nbits: bit length of the bitset + * @names: array of bit names (optional) + * @compact: assume compact format for output + * + * Estimate length of netlink attribute composed by a later call to + * ethnl_put_bitset32() call with the same arguments. + * + * Return: negative error code or attribute length estimate + */ +int ethnl_bitset32_size(const u32 *val, const u32 *mask, unsigned int nbits, + ethnl_string_array_t names, bool compact) +{ + unsigned int len = 0; + + /* list flag */ + if (!mask) + len += nla_total_size(sizeof(u32)); + /* size */ + len += nla_total_size(sizeof(u32)); + + if (compact) { + unsigned int nwords = DIV_ROUND_UP(nbits, 32); + + /* value, mask */ + len += (mask ? 2 : 1) * nla_total_size(nwords * sizeof(u32)); + } else { + unsigned int bits_len = 0; + unsigned int bit_len, i; + + for (i = 0; i < nbits; i++) { + const char *name = names ? names[i] : NULL; + + if (!ethnl_bitmap32_test_bit(mask ?: val, i)) + continue; + /* index */ + bit_len = nla_total_size(sizeof(u32)); + /* name */ + if (name) + bit_len += ethnl_strz_size(name); + /* value */ + if (mask && ethnl_bitmap32_test_bit(val, i)) + bit_len += nla_total_size(0); + + /* bit nest */ + bits_len += nla_total_size(bit_len); + } + /* bits nest */ + len += nla_total_size(bits_len); + } + + /* outermost nest */ + return nla_total_size(len); +} + +/** + * ethnl_put_bitset32() - Put a bitset nest into a message + * @skb: skb with the message + * @attrtype: attribute type for the bitset nest + * @val: value bitmap (u32 based) + * @mask: mask bitmap (u32 based, optional) + * @nbits: bit length of the bitset + * @names: array of bit names (optional) + * @compact: use compact format for the output + * + * Compose a nested attribute representing a bitset. If @mask is null, simple + * bitmap (bit list) is created, if @mask is provided, represent a value/mask + * pair. Bit names are only used in verbose mode and when provided by calller. + * + * Return: 0 on success, negative error value on error + */ +int ethnl_put_bitset32(struct sk_buff *skb, int attrtype, const u32 *val, + const u32 *mask, unsigned int nbits, + ethnl_string_array_t names, bool compact) +{ + struct nlattr *nest; + struct nlattr *attr; + + nest = nla_nest_start(skb, attrtype); + if (!nest) + return -EMSGSIZE; + + if (!mask && nla_put_flag(skb, ETHTOOL_A_BITSET_NOMASK)) + goto nla_put_failure; + if (nla_put_u32(skb, ETHTOOL_A_BITSET_SIZE, nbits)) + goto nla_put_failure; + if (compact) { + unsigned int nwords = DIV_ROUND_UP(nbits, 32); + unsigned int nbytes = nwords * sizeof(u32); + u32 *dst; + + attr = nla_reserve(skb, ETHTOOL_A_BITSET_VALUE, nbytes); + if (!attr) + goto nla_put_failure; + dst = nla_data(attr); + memcpy(dst, val, nbytes); + if (nbits % 32) + dst[nwords - 1] &= ethnl_lower_bits(nbits); + + if (mask) { + attr = nla_reserve(skb, ETHTOOL_A_BITSET_MASK, nbytes); + if (!attr) + goto nla_put_failure; + dst = nla_data(attr); + memcpy(dst, mask, nbytes); + if (nbits % 32) + dst[nwords - 1] &= ethnl_lower_bits(nbits); + } + } else { + struct nlattr *bits; + unsigned int i; + + bits = nla_nest_start(skb, ETHTOOL_A_BITSET_BITS); + if (!bits) + goto nla_put_failure; + for (i = 0; i < nbits; i++) { + const char *name = names ? names[i] : NULL; + + if (!ethnl_bitmap32_test_bit(mask ?: val, i)) + continue; + attr = nla_nest_start(skb, ETHTOOL_A_BITSET_BITS_BIT); + if (!attr) + goto nla_put_failure; + if (nla_put_u32(skb, ETHTOOL_A_BITSET_BIT_INDEX, i)) + goto nla_put_failure; + if (name && + ethnl_put_strz(skb, ETHTOOL_A_BITSET_BIT_NAME, name)) + goto nla_put_failure; + if (mask && ethnl_bitmap32_test_bit(val, i) && + nla_put_flag(skb, ETHTOOL_A_BITSET_BIT_VALUE)) + goto nla_put_failure; + nla_nest_end(skb, attr); + } + nla_nest_end(skb, bits); + } + + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static const struct nla_policy bitset_policy[] = { + [ETHTOOL_A_BITSET_NOMASK] = { .type = NLA_FLAG }, + [ETHTOOL_A_BITSET_SIZE] = NLA_POLICY_MAX(NLA_U32, + ETHNL_MAX_BITSET_SIZE), + [ETHTOOL_A_BITSET_BITS] = { .type = NLA_NESTED }, + [ETHTOOL_A_BITSET_VALUE] = { .type = NLA_BINARY }, + [ETHTOOL_A_BITSET_MASK] = { .type = NLA_BINARY }, +}; + +static const struct nla_policy bit_policy[] = { + [ETHTOOL_A_BITSET_BIT_INDEX] = { .type = NLA_U32 }, + [ETHTOOL_A_BITSET_BIT_NAME] = { .type = NLA_NUL_STRING }, + [ETHTOOL_A_BITSET_BIT_VALUE] = { .type = NLA_FLAG }, +}; + +/** + * ethnl_bitset_is_compact() - check if bitset attribute represents a compact + * bitset + * @bitset: nested attribute representing a bitset + * @compact: pointer for return value + * + * Return: 0 on success, negative error code on failure + */ +int ethnl_bitset_is_compact(const struct nlattr *bitset, bool *compact) +{ + struct nlattr *tb[ARRAY_SIZE(bitset_policy)]; + int ret; + + ret = nla_parse_nested(tb, ARRAY_SIZE(bitset_policy) - 1, bitset, + bitset_policy, NULL); + if (ret < 0) + return ret; + + if (tb[ETHTOOL_A_BITSET_BITS]) { + if (tb[ETHTOOL_A_BITSET_VALUE] || tb[ETHTOOL_A_BITSET_MASK]) + return -EINVAL; + *compact = false; + return 0; + } + if (!tb[ETHTOOL_A_BITSET_SIZE] || !tb[ETHTOOL_A_BITSET_VALUE]) + return -EINVAL; + + *compact = true; + return 0; +} + +/** + * ethnl_name_to_idx() - look up string index for a name + * @names: array of ETH_GSTRING_LEN sized strings + * @n_names: number of strings in the array + * @name: name to look up + * + * Return: index of the string if found, -ENOENT if not found + */ +static int ethnl_name_to_idx(ethnl_string_array_t names, unsigned int n_names, + const char *name) +{ + unsigned int i; + + if (!names) + return -ENOENT; + + for (i = 0; i < n_names; i++) { + /* names[i] may not be null terminated */ + if (!strncmp(names[i], name, ETH_GSTRING_LEN) && + strlen(name) <= ETH_GSTRING_LEN) + return i; + } + + return -ENOENT; +} + +static int ethnl_parse_bit(unsigned int *index, bool *val, unsigned int nbits, + const struct nlattr *bit_attr, bool no_mask, + ethnl_string_array_t names, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[ARRAY_SIZE(bit_policy)]; + int ret, idx; + + ret = nla_parse_nested(tb, ARRAY_SIZE(bit_policy) - 1, bit_attr, + bit_policy, extack); + if (ret < 0) + return ret; + + if (tb[ETHTOOL_A_BITSET_BIT_INDEX]) { + const char *name; + + idx = nla_get_u32(tb[ETHTOOL_A_BITSET_BIT_INDEX]); + if (idx >= nbits) { + NL_SET_ERR_MSG_ATTR(extack, + tb[ETHTOOL_A_BITSET_BIT_INDEX], + "bit index too high"); + return -EOPNOTSUPP; + } + name = names ? names[idx] : NULL; + if (tb[ETHTOOL_A_BITSET_BIT_NAME] && name && + strncmp(nla_data(tb[ETHTOOL_A_BITSET_BIT_NAME]), name, + nla_len(tb[ETHTOOL_A_BITSET_BIT_NAME]))) { + NL_SET_ERR_MSG_ATTR(extack, bit_attr, + "bit index and name mismatch"); + return -EINVAL; + } + } else if (tb[ETHTOOL_A_BITSET_BIT_NAME]) { + idx = ethnl_name_to_idx(names, nbits, + nla_data(tb[ETHTOOL_A_BITSET_BIT_NAME])); + if (idx < 0) { + NL_SET_ERR_MSG_ATTR(extack, + tb[ETHTOOL_A_BITSET_BIT_NAME], + "bit name not found"); + return -EOPNOTSUPP; + } + } else { + NL_SET_ERR_MSG_ATTR(extack, bit_attr, + "neither bit index nor name specified"); + return -EINVAL; + } + + *index = idx; + *val = no_mask || tb[ETHTOOL_A_BITSET_BIT_VALUE]; + return 0; +} + +/** + * ethnl_bitmap32_equal() - Compare two bitmaps + * @map1: first bitmap + * @map2: second bitmap + * @nbits: bit size to compare + * + * Return: true if first @nbits are equal, false if not + */ +static bool ethnl_bitmap32_equal(const u32 *map1, const u32 *map2, + unsigned int nbits) +{ + if (memcmp(map1, map2, nbits / 32 * sizeof(u32))) + return false; + if (nbits % 32 == 0) + return true; + return !((map1[nbits / 32] ^ map2[nbits / 32]) & + ethnl_lower_bits(nbits % 32)); +} + +static int +ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, + const struct nlattr *attr, struct nlattr **tb, + ethnl_string_array_t names, + struct netlink_ext_ack *extack, bool *mod) +{ + u32 *saved_bitmap = NULL; + struct nlattr *bit_attr; + bool no_mask; + int rem; + int ret; + + if (tb[ETHTOOL_A_BITSET_VALUE]) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_VALUE], + "value only allowed in compact bitset"); + return -EINVAL; + } + if (tb[ETHTOOL_A_BITSET_MASK]) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_MASK], + "mask only allowed in compact bitset"); + return -EINVAL; + } + + no_mask = tb[ETHTOOL_A_BITSET_NOMASK]; + if (no_mask) { + unsigned int nwords = DIV_ROUND_UP(nbits, 32); + unsigned int nbytes = nwords * sizeof(u32); + bool dummy; + + /* The bitmap size is only the size of the map part without + * its mask part. + */ + saved_bitmap = kcalloc(nwords, sizeof(u32), GFP_KERNEL); + if (!saved_bitmap) + return -ENOMEM; + memcpy(saved_bitmap, bitmap, nbytes); + ethnl_bitmap32_clear(bitmap, 0, nbits, &dummy); + } + + nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) { + bool old_val, new_val; + unsigned int idx; + + if (nla_type(bit_attr) != ETHTOOL_A_BITSET_BITS_BIT) { + NL_SET_ERR_MSG_ATTR(extack, bit_attr, + "only ETHTOOL_A_BITSET_BITS_BIT allowed in ETHTOOL_A_BITSET_BITS"); + kfree(saved_bitmap); + return -EINVAL; + } + ret = ethnl_parse_bit(&idx, &new_val, nbits, bit_attr, no_mask, + names, extack); + if (ret < 0) { + kfree(saved_bitmap); + return ret; + } + old_val = bitmap[idx / 32] & ((u32)1 << (idx % 32)); + if (new_val != old_val) { + if (new_val) + bitmap[idx / 32] |= ((u32)1 << (idx % 32)); + else + bitmap[idx / 32] &= ~((u32)1 << (idx % 32)); + if (!no_mask) + *mod = true; + } + } + + if (no_mask && !ethnl_bitmap32_equal(saved_bitmap, bitmap, nbits)) + *mod = true; + + kfree(saved_bitmap); + return 0; +} + +static int ethnl_compact_sanity_checks(unsigned int nbits, + const struct nlattr *nest, + struct nlattr **tb, + struct netlink_ext_ack *extack) +{ + bool no_mask = tb[ETHTOOL_A_BITSET_NOMASK]; + unsigned int attr_nbits, attr_nwords; + const struct nlattr *test_attr; + + if (no_mask && tb[ETHTOOL_A_BITSET_MASK]) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_MASK], + "mask not allowed in list bitset"); + return -EINVAL; + } + if (!tb[ETHTOOL_A_BITSET_SIZE]) { + NL_SET_ERR_MSG_ATTR(extack, nest, + "missing size in compact bitset"); + return -EINVAL; + } + if (!tb[ETHTOOL_A_BITSET_VALUE]) { + NL_SET_ERR_MSG_ATTR(extack, nest, + "missing value in compact bitset"); + return -EINVAL; + } + if (!no_mask && !tb[ETHTOOL_A_BITSET_MASK]) { + NL_SET_ERR_MSG_ATTR(extack, nest, + "missing mask in compact nonlist bitset"); + return -EINVAL; + } + + attr_nbits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]); + attr_nwords = DIV_ROUND_UP(attr_nbits, 32); + if (nla_len(tb[ETHTOOL_A_BITSET_VALUE]) != attr_nwords * sizeof(u32)) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_VALUE], + "bitset value length does not match size"); + return -EINVAL; + } + if (tb[ETHTOOL_A_BITSET_MASK] && + nla_len(tb[ETHTOOL_A_BITSET_MASK]) != attr_nwords * sizeof(u32)) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_MASK], + "bitset mask length does not match size"); + return -EINVAL; + } + if (attr_nbits <= nbits) + return 0; + + test_attr = no_mask ? tb[ETHTOOL_A_BITSET_VALUE] : + tb[ETHTOOL_A_BITSET_MASK]; + if (ethnl_bitmap32_not_zero(nla_data(test_attr), nbits, attr_nbits)) { + NL_SET_ERR_MSG_ATTR(extack, test_attr, + "cannot modify bits past kernel bitset size"); + return -EINVAL; + } + return 0; +} + +/** + * ethnl_update_bitset32() - Apply a bitset nest to a u32 based bitmap + * @bitmap: bitmap to update + * @nbits: size of the updated bitmap in bits + * @attr: nest attribute to parse and apply + * @names: array of bit names; may be null for compact format + * @extack: extack for error reporting + * @mod: set this to true if bitmap is modified, leave as it is if not + * + * Apply bitset netsted attribute to a bitmap. If the attribute represents + * a bit list, @bitmap is set to its contents; otherwise, bits in mask are + * set to values from value. Bitmaps in the attribute may be longer than + * @nbits but the message must not request modifying any bits past @nbits. + * + * Return: negative error code on failure, 0 on success + */ +int ethnl_update_bitset32(u32 *bitmap, unsigned int nbits, + const struct nlattr *attr, ethnl_string_array_t names, + struct netlink_ext_ack *extack, bool *mod) +{ + struct nlattr *tb[ARRAY_SIZE(bitset_policy)]; + unsigned int change_bits; + bool no_mask; + int ret; + + if (!attr) + return 0; + ret = nla_parse_nested(tb, ARRAY_SIZE(bitset_policy) - 1, attr, + bitset_policy, extack); + if (ret < 0) + return ret; + + if (tb[ETHTOOL_A_BITSET_BITS]) + return ethnl_update_bitset32_verbose(bitmap, nbits, attr, tb, + names, extack, mod); + ret = ethnl_compact_sanity_checks(nbits, attr, tb, extack); + if (ret < 0) + return ret; + + no_mask = tb[ETHTOOL_A_BITSET_NOMASK]; + change_bits = min_t(unsigned int, + nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]), nbits); + ethnl_bitmap32_update(bitmap, change_bits, + nla_data(tb[ETHTOOL_A_BITSET_VALUE]), + no_mask ? NULL : + nla_data(tb[ETHTOOL_A_BITSET_MASK]), + mod); + if (no_mask && change_bits < nbits) + ethnl_bitmap32_clear(bitmap, change_bits, nbits, mod); + + return 0; +} + +/** + * ethnl_parse_bitset() - Compute effective value and mask from bitset nest + * @val: unsigned long based bitmap to put value into + * @mask: unsigned long based bitmap to put mask into + * @nbits: size of @val and @mask bitmaps + * @attr: nest attribute to parse and apply + * @names: array of bit names; may be null for compact format + * @extack: extack for error reporting + * + * Provide @nbits size long bitmaps for value and mask so that + * x = (val & mask) | (x & ~mask) would modify any @nbits sized bitmap x + * the same way ethnl_update_bitset() with the same bitset attribute would. + * + * Return: negative error code on failure, 0 on success + */ +int ethnl_parse_bitset(unsigned long *val, unsigned long *mask, + unsigned int nbits, const struct nlattr *attr, + ethnl_string_array_t names, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[ARRAY_SIZE(bitset_policy)]; + const struct nlattr *bit_attr; + bool no_mask; + int rem; + int ret; + + if (!attr) + return 0; + ret = nla_parse_nested(tb, ARRAY_SIZE(bitset_policy) - 1, attr, + bitset_policy, extack); + if (ret < 0) + return ret; + no_mask = tb[ETHTOOL_A_BITSET_NOMASK]; + + if (!tb[ETHTOOL_A_BITSET_BITS]) { + unsigned int change_bits; + + ret = ethnl_compact_sanity_checks(nbits, attr, tb, extack); + if (ret < 0) + return ret; + + change_bits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]); + if (change_bits > nbits) + change_bits = nbits; + bitmap_from_arr32(val, nla_data(tb[ETHTOOL_A_BITSET_VALUE]), + change_bits); + if (change_bits < nbits) + bitmap_clear(val, change_bits, nbits - change_bits); + if (no_mask) { + bitmap_fill(mask, nbits); + } else { + bitmap_from_arr32(mask, + nla_data(tb[ETHTOOL_A_BITSET_MASK]), + change_bits); + if (change_bits < nbits) + bitmap_clear(mask, change_bits, + nbits - change_bits); + } + + return 0; + } + + if (tb[ETHTOOL_A_BITSET_VALUE]) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_VALUE], + "value only allowed in compact bitset"); + return -EINVAL; + } + if (tb[ETHTOOL_A_BITSET_MASK]) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_MASK], + "mask only allowed in compact bitset"); + return -EINVAL; + } + + bitmap_zero(val, nbits); + if (no_mask) + bitmap_fill(mask, nbits); + else + bitmap_zero(mask, nbits); + + nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) { + unsigned int idx; + bool bit_val; + + ret = ethnl_parse_bit(&idx, &bit_val, nbits, bit_attr, no_mask, + names, extack); + if (ret < 0) + return ret; + if (bit_val) + __set_bit(idx, val); + if (!no_mask) + __set_bit(idx, mask); + } + + return 0; +} + +#if BITS_PER_LONG == 64 && defined(__BIG_ENDIAN) + +/* 64-bit big endian architectures are the only case when u32 based bitmaps + * and unsigned long based bitmaps have different memory layout so that we + * cannot simply cast the latter to the former and need actual wrappers + * converting the latter to the former. + * + * To reduce the number of slab allocations, the wrappers use fixed size local + * variables for bitmaps up to ETHNL_SMALL_BITMAP_BITS bits which is the + * majority of bitmaps used by ethtool. + */ +#define ETHNL_SMALL_BITMAP_BITS 128 +#define ETHNL_SMALL_BITMAP_WORDS DIV_ROUND_UP(ETHNL_SMALL_BITMAP_BITS, 32) + +int ethnl_bitset_size(const unsigned long *val, const unsigned long *mask, + unsigned int nbits, ethnl_string_array_t names, + bool compact) +{ + u32 small_mask32[ETHNL_SMALL_BITMAP_WORDS]; + u32 small_val32[ETHNL_SMALL_BITMAP_WORDS]; + u32 *mask32; + u32 *val32; + int ret; + + if (nbits > ETHNL_SMALL_BITMAP_BITS) { + unsigned int nwords = DIV_ROUND_UP(nbits, 32); + + val32 = kmalloc_array(2 * nwords, sizeof(u32), GFP_KERNEL); + if (!val32) + return -ENOMEM; + mask32 = val32 + nwords; + } else { + val32 = small_val32; + mask32 = small_mask32; + } + + bitmap_to_arr32(val32, val, nbits); + if (mask) + bitmap_to_arr32(mask32, mask, nbits); + else + mask32 = NULL; + ret = ethnl_bitset32_size(val32, mask32, nbits, names, compact); + + if (nbits > ETHNL_SMALL_BITMAP_BITS) + kfree(val32); + + return ret; +} + +int ethnl_put_bitset(struct sk_buff *skb, int attrtype, + const unsigned long *val, const unsigned long *mask, + unsigned int nbits, ethnl_string_array_t names, + bool compact) +{ + u32 small_mask32[ETHNL_SMALL_BITMAP_WORDS]; + u32 small_val32[ETHNL_SMALL_BITMAP_WORDS]; + u32 *mask32; + u32 *val32; + int ret; + + if (nbits > ETHNL_SMALL_BITMAP_BITS) { + unsigned int nwords = DIV_ROUND_UP(nbits, 32); + + val32 = kmalloc_array(2 * nwords, sizeof(u32), GFP_KERNEL); + if (!val32) + return -ENOMEM; + mask32 = val32 + nwords; + } else { + val32 = small_val32; + mask32 = small_mask32; + } + + bitmap_to_arr32(val32, val, nbits); + if (mask) + bitmap_to_arr32(mask32, mask, nbits); + else + mask32 = NULL; + ret = ethnl_put_bitset32(skb, attrtype, val32, mask32, nbits, names, + compact); + + if (nbits > ETHNL_SMALL_BITMAP_BITS) + kfree(val32); + + return ret; +} + +int ethnl_update_bitset(unsigned long *bitmap, unsigned int nbits, + const struct nlattr *attr, ethnl_string_array_t names, + struct netlink_ext_ack *extack, bool *mod) +{ + u32 small_bitmap32[ETHNL_SMALL_BITMAP_WORDS]; + u32 *bitmap32 = small_bitmap32; + bool u32_mod = false; + int ret; + + if (nbits > ETHNL_SMALL_BITMAP_BITS) { + unsigned int dst_words = DIV_ROUND_UP(nbits, 32); + + bitmap32 = kmalloc_array(dst_words, sizeof(u32), GFP_KERNEL); + if (!bitmap32) + return -ENOMEM; + } + + bitmap_to_arr32(bitmap32, bitmap, nbits); + ret = ethnl_update_bitset32(bitmap32, nbits, attr, names, extack, + &u32_mod); + if (u32_mod) { + bitmap_from_arr32(bitmap, bitmap32, nbits); + *mod = true; + } + + if (nbits > ETHNL_SMALL_BITMAP_BITS) + kfree(bitmap32); + + return ret; +} + +#else + +/* On little endian 64-bit and all 32-bit architectures, an unsigned long + * based bitmap can be interpreted as u32 based one using a simple cast. + */ + +int ethnl_bitset_size(const unsigned long *val, const unsigned long *mask, + unsigned int nbits, ethnl_string_array_t names, + bool compact) +{ + return ethnl_bitset32_size((const u32 *)val, (const u32 *)mask, nbits, + names, compact); +} + +int ethnl_put_bitset(struct sk_buff *skb, int attrtype, + const unsigned long *val, const unsigned long *mask, + unsigned int nbits, ethnl_string_array_t names, + bool compact) +{ + return ethnl_put_bitset32(skb, attrtype, (const u32 *)val, + (const u32 *)mask, nbits, names, compact); +} + +int ethnl_update_bitset(unsigned long *bitmap, unsigned int nbits, + const struct nlattr *attr, ethnl_string_array_t names, + struct netlink_ext_ack *extack, bool *mod) +{ + return ethnl_update_bitset32((u32 *)bitmap, nbits, attr, names, extack, + mod); +} + +#endif /* BITS_PER_LONG == 64 && defined(__BIG_ENDIAN) */ diff --git a/net/ethtool/bitset.h b/net/ethtool/bitset.h new file mode 100644 index 000000000000..c2c2e0051d00 --- /dev/null +++ b/net/ethtool/bitset.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _NET_ETHTOOL_BITSET_H +#define _NET_ETHTOOL_BITSET_H + +#define ETHNL_MAX_BITSET_SIZE S16_MAX + +typedef const char (*const ethnl_string_array_t)[ETH_GSTRING_LEN]; + +int ethnl_bitset_is_compact(const struct nlattr *bitset, bool *compact); +int ethnl_bitset_size(const unsigned long *val, const unsigned long *mask, + unsigned int nbits, ethnl_string_array_t names, + bool compact); +int ethnl_bitset32_size(const u32 *val, const u32 *mask, unsigned int nbits, + ethnl_string_array_t names, bool compact); +int ethnl_put_bitset(struct sk_buff *skb, int attrtype, + const unsigned long *val, const unsigned long *mask, + unsigned int nbits, ethnl_string_array_t names, + bool compact); +int ethnl_put_bitset32(struct sk_buff *skb, int attrtype, const u32 *val, + const u32 *mask, unsigned int nbits, + ethnl_string_array_t names, bool compact); +int ethnl_update_bitset(unsigned long *bitmap, unsigned int nbits, + const struct nlattr *attr, ethnl_string_array_t names, + struct netlink_ext_ack *extack, bool *mod); +int ethnl_update_bitset32(u32 *bitmap, unsigned int nbits, + const struct nlattr *attr, ethnl_string_array_t names, + struct netlink_ext_ack *extack, bool *mod); +int ethnl_parse_bitset(unsigned long *val, unsigned long *mask, + unsigned int nbits, const struct nlattr *attr, + ethnl_string_array_t names, + struct netlink_ext_ack *extack); + +#endif /* _NET_ETHTOOL_BITSET_H */ diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c new file mode 100644 index 000000000000..0364b8fb577b --- /dev/null +++ b/net/ethtool/cabletest.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/phy.h> +#include <linux/ethtool_netlink.h> +#include <net/netdev_lock.h> +#include "netlink.h" +#include "common.h" + +/* 802.3 standard allows 100 meters for BaseT cables. However longer + * cables might work, depending on the quality of the cables and the + * PHY. So allow testing for up to 150 meters. + */ +#define MAX_CABLE_LENGTH_CM (150 * 100) + +const struct nla_policy ethnl_cable_test_act_policy[] = { + [ETHTOOL_A_CABLE_TEST_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy_phy), +}; + +static int ethnl_cable_test_started(struct phy_device *phydev, u8 cmd) +{ + struct sk_buff *skb; + int err = -ENOMEM; + void *ehdr; + + skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + goto out; + + ehdr = ethnl_bcastmsg_put(skb, cmd); + if (!ehdr) { + err = -EMSGSIZE; + goto out; + } + + err = ethnl_fill_reply_header(skb, phydev->attached_dev, + ETHTOOL_A_CABLE_TEST_NTF_HEADER); + if (err) + goto out; + + err = nla_put_u8(skb, ETHTOOL_A_CABLE_TEST_NTF_STATUS, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED); + if (err) + goto out; + + genlmsg_end(skb, ehdr); + + return ethnl_multicast(skb, phydev->attached_dev); + +out: + nlmsg_free(skb); + phydev_err(phydev, "%s: Error %pe\n", __func__, ERR_PTR(err)); + + return err; +} + +int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info) +{ + struct ethnl_req_info req_info = {}; + const struct ethtool_phy_ops *ops; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; + struct net_device *dev; + int ret; + + ret = ethnl_parse_header_dev_get(&req_info, + tb[ETHTOOL_A_CABLE_TEST_HEADER], + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + + dev = req_info.dev; + + rtnl_lock(); + netdev_lock_ops(dev); + phydev = ethnl_req_get_phydev(&req_info, tb, + ETHTOOL_A_CABLE_TEST_HEADER, + info->extack); + if (IS_ERR_OR_NULL(phydev)) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + + ops = ethtool_phy_ops; + if (!ops || !ops->start_cable_test) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto out_unlock; + + ret = ops->start_cable_test(phydev, info->extack); + + ethnl_ops_complete(dev); + + if (!ret) + ethnl_cable_test_started(phydev, ETHTOOL_MSG_CABLE_TEST_NTF); + +out_unlock: + netdev_unlock_ops(dev); + rtnl_unlock(); + ethnl_parse_header_dev_put(&req_info); + return ret; +} + +int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd) +{ + int err = -ENOMEM; + + /* One TDR sample occupies 20 bytes. For a 150 meter cable, + * with four pairs, around 12K is needed. + */ + phydev->skb = genlmsg_new(SZ_16K, GFP_KERNEL); + if (!phydev->skb) + goto out; + + phydev->ehdr = ethnl_bcastmsg_put(phydev->skb, cmd); + if (!phydev->ehdr) { + err = -EMSGSIZE; + goto out; + } + + err = ethnl_fill_reply_header(phydev->skb, phydev->attached_dev, + ETHTOOL_A_CABLE_TEST_NTF_HEADER); + if (err) + goto out; + + err = nla_put_u8(phydev->skb, ETHTOOL_A_CABLE_TEST_NTF_STATUS, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED); + if (err) + goto out; + + phydev->nest = nla_nest_start(phydev->skb, + ETHTOOL_A_CABLE_TEST_NTF_NEST); + if (!phydev->nest) { + err = -EMSGSIZE; + goto out; + } + + return 0; + +out: + nlmsg_free(phydev->skb); + phydev->skb = NULL; + return err; +} +EXPORT_SYMBOL_GPL(ethnl_cable_test_alloc); + +void ethnl_cable_test_free(struct phy_device *phydev) +{ + nlmsg_free(phydev->skb); + phydev->skb = NULL; +} +EXPORT_SYMBOL_GPL(ethnl_cable_test_free); + +void ethnl_cable_test_finished(struct phy_device *phydev) +{ + nla_nest_end(phydev->skb, phydev->nest); + + genlmsg_end(phydev->skb, phydev->ehdr); + + ethnl_multicast(phydev->skb, phydev->attached_dev); +} +EXPORT_SYMBOL_GPL(ethnl_cable_test_finished); + +int ethnl_cable_test_result_with_src(struct phy_device *phydev, u8 pair, + u8 result, u32 src) +{ + struct nlattr *nest; + int ret = -EMSGSIZE; + + nest = nla_nest_start(phydev->skb, ETHTOOL_A_CABLE_NEST_RESULT); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u8(phydev->skb, ETHTOOL_A_CABLE_RESULT_PAIR, pair)) + goto err; + if (nla_put_u8(phydev->skb, ETHTOOL_A_CABLE_RESULT_CODE, result)) + goto err; + if (src != ETHTOOL_A_CABLE_INF_SRC_UNSPEC) { + if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_RESULT_SRC, src)) + goto err; + } + + nla_nest_end(phydev->skb, nest); + return 0; + +err: + nla_nest_cancel(phydev->skb, nest); + return ret; +} +EXPORT_SYMBOL_GPL(ethnl_cable_test_result_with_src); + +int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev, u8 pair, + u32 cm, u32 src) +{ + struct nlattr *nest; + int ret = -EMSGSIZE; + + nest = nla_nest_start(phydev->skb, + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u8(phydev->skb, ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR, pair)) + goto err; + if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_FAULT_LENGTH_CM, cm)) + goto err; + if (src != ETHTOOL_A_CABLE_INF_SRC_UNSPEC) { + if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_FAULT_LENGTH_SRC, + src)) + goto err; + } + + nla_nest_end(phydev->skb, nest); + return 0; + +err: + nla_nest_cancel(phydev->skb, nest); + return ret; +} +EXPORT_SYMBOL_GPL(ethnl_cable_test_fault_length_with_src); + +static const struct nla_policy cable_test_tdr_act_cfg_policy[] = { + [ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST] = { .type = NLA_U32 }, + [ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST] = { .type = NLA_U32 }, + [ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP] = { .type = NLA_U32 }, + [ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR] = { .type = NLA_U8 }, +}; + +const struct nla_policy ethnl_cable_test_tdr_act_policy[] = { + [ETHTOOL_A_CABLE_TEST_TDR_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy_phy), + [ETHTOOL_A_CABLE_TEST_TDR_CFG] = { .type = NLA_NESTED }, +}; + +/* CABLE_TEST_TDR_ACT */ +static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest, + struct genl_info *info, + struct phy_tdr_config *cfg) +{ + struct nlattr *tb[ARRAY_SIZE(cable_test_tdr_act_cfg_policy)]; + int ret; + + cfg->first = 100; + cfg->step = 100; + cfg->last = MAX_CABLE_LENGTH_CM; + cfg->pair = PHY_PAIR_ALL; + + if (!nest) + return 0; + + ret = nla_parse_nested(tb, + ARRAY_SIZE(cable_test_tdr_act_cfg_policy) - 1, + nest, cable_test_tdr_act_cfg_policy, + info->extack); + if (ret < 0) + return ret; + + if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST]) + cfg->first = nla_get_u32( + tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST]); + + if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST]) + cfg->last = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST]); + + if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP]) + cfg->step = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP]); + + if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]) { + cfg->pair = nla_get_u8(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]); + if (cfg->pair > ETHTOOL_A_CABLE_PAIR_D) { + NL_SET_ERR_MSG_ATTR( + info->extack, + tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR], + "invalid pair parameter"); + return -EINVAL; + } + } + + if (cfg->first > MAX_CABLE_LENGTH_CM) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST], + "invalid first parameter"); + return -EINVAL; + } + + if (cfg->last > MAX_CABLE_LENGTH_CM) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST], + "invalid last parameter"); + return -EINVAL; + } + + if (cfg->first > cfg->last) { + NL_SET_ERR_MSG(info->extack, "invalid first/last parameter"); + return -EINVAL; + } + + if (!cfg->step) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP], + "invalid step parameter"); + return -EINVAL; + } + + if (cfg->step > (cfg->last - cfg->first)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP], + "step parameter too big"); + return -EINVAL; + } + + return 0; +} + +int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info) +{ + struct ethnl_req_info req_info = {}; + const struct ethtool_phy_ops *ops; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; + struct phy_tdr_config cfg; + struct net_device *dev; + int ret; + + ret = ethnl_parse_header_dev_get(&req_info, + tb[ETHTOOL_A_CABLE_TEST_TDR_HEADER], + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + + dev = req_info.dev; + + ret = ethnl_act_cable_test_tdr_cfg(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG], + info, &cfg); + if (ret) + goto out_dev_put; + + rtnl_lock(); + netdev_lock_ops(dev); + phydev = ethnl_req_get_phydev(&req_info, tb, + ETHTOOL_A_CABLE_TEST_TDR_HEADER, + info->extack); + if (IS_ERR_OR_NULL(phydev)) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + + ops = ethtool_phy_ops; + if (!ops || !ops->start_cable_test_tdr) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto out_unlock; + + ret = ops->start_cable_test_tdr(phydev, info->extack, &cfg); + + ethnl_ops_complete(dev); + + if (!ret) + ethnl_cable_test_started(phydev, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF); + +out_unlock: + netdev_unlock_ops(dev); + rtnl_unlock(); +out_dev_put: + ethnl_parse_header_dev_put(&req_info); + return ret; +} + +int ethnl_cable_test_amplitude(struct phy_device *phydev, + u8 pair, s16 mV) +{ + struct nlattr *nest; + int ret = -EMSGSIZE; + + nest = nla_nest_start(phydev->skb, + ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u8(phydev->skb, ETHTOOL_A_CABLE_AMPLITUDE_PAIR, pair)) + goto err; + if (nla_put_u16(phydev->skb, ETHTOOL_A_CABLE_AMPLITUDE_mV, mV)) + goto err; + + nla_nest_end(phydev->skb, nest); + return 0; + +err: + nla_nest_cancel(phydev->skb, nest); + return ret; +} +EXPORT_SYMBOL_GPL(ethnl_cable_test_amplitude); + +int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV) +{ + struct nlattr *nest; + int ret = -EMSGSIZE; + + nest = nla_nest_start(phydev->skb, ETHTOOL_A_CABLE_TDR_NEST_PULSE); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u16(phydev->skb, ETHTOOL_A_CABLE_PULSE_mV, mV)) + goto err; + + nla_nest_end(phydev->skb, nest); + return 0; + +err: + nla_nest_cancel(phydev->skb, nest); + return ret; +} +EXPORT_SYMBOL_GPL(ethnl_cable_test_pulse); + +int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last, + u32 step) +{ + struct nlattr *nest; + int ret = -EMSGSIZE; + + nest = nla_nest_start(phydev->skb, ETHTOOL_A_CABLE_TDR_NEST_STEP); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE, + first)) + goto err; + + if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_STEP_LAST_DISTANCE, last)) + goto err; + + if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_STEP_STEP_DISTANCE, step)) + goto err; + + nla_nest_end(phydev->skb, nest); + return 0; + +err: + nla_nest_cancel(phydev->skb, nest); + return ret; +} +EXPORT_SYMBOL_GPL(ethnl_cable_test_step); diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c new file mode 100644 index 000000000000..ca4f80282448 --- /dev/null +++ b/net/ethtool/channels.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <net/xdp_sock_drv.h> + +#include "netlink.h" +#include "common.h" + +struct channels_req_info { + struct ethnl_req_info base; +}; + +struct channels_reply_data { + struct ethnl_reply_data base; + struct ethtool_channels channels; +}; + +#define CHANNELS_REPDATA(__reply_base) \ + container_of(__reply_base, struct channels_reply_data, base) + +const struct nla_policy ethnl_channels_get_policy[] = { + [ETHTOOL_A_CHANNELS_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int channels_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct channels_reply_data *data = CHANNELS_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + if (!dev->ethtool_ops->get_channels) + return -EOPNOTSUPP; + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + dev->ethtool_ops->get_channels(dev, &data->channels); + ethnl_ops_complete(dev); + + return 0; +} + +static int channels_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + return nla_total_size(sizeof(u32)) + /* _CHANNELS_RX_MAX */ + nla_total_size(sizeof(u32)) + /* _CHANNELS_TX_MAX */ + nla_total_size(sizeof(u32)) + /* _CHANNELS_OTHER_MAX */ + nla_total_size(sizeof(u32)) + /* _CHANNELS_COMBINED_MAX */ + nla_total_size(sizeof(u32)) + /* _CHANNELS_RX_COUNT */ + nla_total_size(sizeof(u32)) + /* _CHANNELS_TX_COUNT */ + nla_total_size(sizeof(u32)) + /* _CHANNELS_OTHER_COUNT */ + nla_total_size(sizeof(u32)); /* _CHANNELS_COMBINED_COUNT */ +} + +static int channels_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct channels_reply_data *data = CHANNELS_REPDATA(reply_base); + const struct ethtool_channels *channels = &data->channels; + + if ((channels->max_rx && + (nla_put_u32(skb, ETHTOOL_A_CHANNELS_RX_MAX, + channels->max_rx) || + nla_put_u32(skb, ETHTOOL_A_CHANNELS_RX_COUNT, + channels->rx_count))) || + (channels->max_tx && + (nla_put_u32(skb, ETHTOOL_A_CHANNELS_TX_MAX, + channels->max_tx) || + nla_put_u32(skb, ETHTOOL_A_CHANNELS_TX_COUNT, + channels->tx_count))) || + (channels->max_other && + (nla_put_u32(skb, ETHTOOL_A_CHANNELS_OTHER_MAX, + channels->max_other) || + nla_put_u32(skb, ETHTOOL_A_CHANNELS_OTHER_COUNT, + channels->other_count))) || + (channels->max_combined && + (nla_put_u32(skb, ETHTOOL_A_CHANNELS_COMBINED_MAX, + channels->max_combined) || + nla_put_u32(skb, ETHTOOL_A_CHANNELS_COMBINED_COUNT, + channels->combined_count)))) + return -EMSGSIZE; + + return 0; +} + +/* CHANNELS_SET */ + +const struct nla_policy ethnl_channels_set_policy[] = { + [ETHTOOL_A_CHANNELS_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_CHANNELS_RX_COUNT] = { .type = NLA_U32 }, + [ETHTOOL_A_CHANNELS_TX_COUNT] = { .type = NLA_U32 }, + [ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .type = NLA_U32 }, + [ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .type = NLA_U32 }, +}; + +static int +ethnl_set_channels_validate(struct ethnl_req_info *req_info, + struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + + return ops->get_channels && ops->set_channels ? 1 : -EOPNOTSUPP; +} + +static int +ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info) +{ + unsigned int from_channel, old_total, i; + bool mod = false, mod_combined = false; + struct net_device *dev = req_info->dev; + struct ethtool_channels channels = {}; + struct nlattr **tb = info->attrs; + u32 err_attr; + int ret; + + dev->ethtool_ops->get_channels(dev, &channels); + old_total = channels.combined_count + + max(channels.rx_count, channels.tx_count); + + ethnl_update_u32(&channels.rx_count, tb[ETHTOOL_A_CHANNELS_RX_COUNT], + &mod); + ethnl_update_u32(&channels.tx_count, tb[ETHTOOL_A_CHANNELS_TX_COUNT], + &mod); + ethnl_update_u32(&channels.other_count, + tb[ETHTOOL_A_CHANNELS_OTHER_COUNT], &mod); + ethnl_update_u32(&channels.combined_count, + tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT], &mod_combined); + mod |= mod_combined; + if (!mod) + return 0; + + /* ensure new channel counts are within limits */ + if (channels.rx_count > channels.max_rx) + err_attr = ETHTOOL_A_CHANNELS_RX_COUNT; + else if (channels.tx_count > channels.max_tx) + err_attr = ETHTOOL_A_CHANNELS_TX_COUNT; + else if (channels.other_count > channels.max_other) + err_attr = ETHTOOL_A_CHANNELS_OTHER_COUNT; + else if (channels.combined_count > channels.max_combined) + err_attr = ETHTOOL_A_CHANNELS_COMBINED_COUNT; + else + err_attr = 0; + if (err_attr) { + NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr], + "requested channel count exceeds maximum"); + return -EINVAL; + } + + /* ensure there is at least one RX and one TX channel */ + if (!channels.combined_count && !channels.rx_count) + err_attr = ETHTOOL_A_CHANNELS_RX_COUNT; + else if (!channels.combined_count && !channels.tx_count) + err_attr = ETHTOOL_A_CHANNELS_TX_COUNT; + else + err_attr = 0; + if (err_attr) { + if (mod_combined) + err_attr = ETHTOOL_A_CHANNELS_COMBINED_COUNT; + NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr], + "requested channel counts would result in no RX or TX channel being configured"); + return -EINVAL; + } + + ret = ethtool_check_max_channel(dev, channels, info); + if (ret) + return ret; + + /* Disabling channels, query zero-copy AF_XDP sockets */ + from_channel = channels.combined_count + + min(channels.rx_count, channels.tx_count); + for (i = from_channel; i < old_total; i++) + if (xsk_get_pool_from_qid(dev, i)) { + GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets"); + return -EINVAL; + } + + ret = dev->ethtool_ops->set_channels(dev, &channels); + return ret < 0 ? ret : 1; +} + +const struct ethnl_request_ops ethnl_channels_request_ops = { + .request_cmd = ETHTOOL_MSG_CHANNELS_GET, + .reply_cmd = ETHTOOL_MSG_CHANNELS_GET_REPLY, + .hdr_attr = ETHTOOL_A_CHANNELS_HEADER, + .req_info_size = sizeof(struct channels_req_info), + .reply_data_size = sizeof(struct channels_reply_data), + + .prepare_data = channels_prepare_data, + .reply_size = channels_reply_size, + .fill_reply = channels_fill_reply, + + .set_validate = ethnl_set_channels_validate, + .set = ethnl_set_channels, + .set_ntf_cmd = ETHTOOL_MSG_CHANNELS_NTF, +}; diff --git a/net/ethtool/cmis.h b/net/ethtool/cmis.h new file mode 100644 index 000000000000..4a9a946cabf0 --- /dev/null +++ b/net/ethtool/cmis.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#define ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH 120 +#define ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH 2048 +#define ETHTOOL_CMIS_CDB_CMD_PAGE 0x9F +#define ETHTOOL_CMIS_CDB_PAGE_I2C_ADDR 0x50 + +/** + * struct ethtool_cmis_cdb - CDB commands parameters + * @cmis_rev: CMIS revision major. + * @read_write_len_ext: Allowable additional number of byte octets to the LPL + * in a READ or a WRITE CDB commands. + * @max_completion_time: Maximum CDB command completion time in msec. + */ +struct ethtool_cmis_cdb { + u8 cmis_rev; + u8 read_write_len_ext; + u16 max_completion_time; +}; + +enum ethtool_cmis_cdb_cmd_id { + ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS = 0x0000, + ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES = 0x0040, + ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 0x0041, + ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 0x0101, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 0x0103, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL = 0x0104, + ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 0x0107, + ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 0x0109, + ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 0x010A, +}; + +/** + * struct ethtool_cmis_cdb_request - CDB commands request fields as decribed in + * the CMIS standard + * @id: Command ID. + * @epl_len: EPL memory length. + * @lpl_len: LPL memory length. + * @chk_code: Check code for the previous field and the payload. + * @resv1: Added to match the CMIS standard request continuity. + * @resv2: Added to match the CMIS standard request continuity. + * @payload: Payload for the CDB commands. + * @epl: Extended payload for the CDB commands. + */ +struct ethtool_cmis_cdb_request { + __be16 id; + struct_group(body, + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH]; + ); + u8 *epl; /* Everything above this field checksummed. */ +}; + +#define CDB_F_COMPLETION_VALID BIT(0) +#define CDB_F_STATUS_VALID BIT(1) +#define CDB_F_MODULE_STATE_VALID BIT(2) + +/** + * struct ethtool_cmis_cdb_cmd_args - CDB commands execution arguments + * @req: CDB command fields as described in the CMIS standard. + * @max_duration: Maximum duration time for command completion in msec. + * @read_write_len_ext: Allowable additional number of byte octets to the LPL + * in a READ or a WRITE commands. + * @msleep_pre_rpl: Waiting time before checking reply in msec. + * @rpl_exp_len: Expected reply length in bytes. + * @flags: Validation flags for CDB commands. + * @err_msg: Error message to be sent to user space. + */ +struct ethtool_cmis_cdb_cmd_args { + struct ethtool_cmis_cdb_request req; + u16 max_duration; + u8 read_write_len_ext; + u8 msleep_pre_rpl; + u8 rpl_exp_len; + u8 flags; + char *err_msg; +}; + +/** + * struct ethtool_cmis_cdb_rpl_hdr - CDB commands reply header arguments + * @rpl_len: Reply length. + * @rpl_chk_code: Reply check code. + */ +struct ethtool_cmis_cdb_rpl_hdr { + u8 rpl_len; + u8 rpl_chk_code; +}; + +/** + * struct ethtool_cmis_cdb_rpl - CDB commands reply arguments + * @hdr: CDB commands reply header arguments. + * @payload: Payload for the CDB commands reply. + */ +struct ethtool_cmis_cdb_rpl { + struct ethtool_cmis_cdb_rpl_hdr hdr; + u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH]; +}; + +u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs); + +void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args, + enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl, + u8 lpl_len, u8 *epl, u16 epl_len, + u16 max_duration, u8 read_write_len_ext, + u16 msleep_pre_rpl, u8 rpl_exp_len, + u8 flags); + +void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags); + +void ethtool_cmis_page_init(struct ethtool_module_eeprom *page_data, + u8 page, u32 offset, u32 length); + +struct ethtool_cmis_cdb * +ethtool_cmis_cdb_init(struct net_device *dev, + const struct ethtool_module_fw_flash_params *params, + struct ethnl_module_fw_flash_ntf_params *ntf_params); +void ethtool_cmis_cdb_fini(struct ethtool_cmis_cdb *cdb); + +int ethtool_cmis_wait_for_cond(struct net_device *dev, u8 flags, u8 flag, + u16 max_duration, u32 offset, + bool (*cond_success)(u8), bool (*cond_fail)(u8), u8 *state); + +int ethtool_cmis_cdb_execute_cmd(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args); diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c new file mode 100644 index 000000000000..3057576bc81e --- /dev/null +++ b/net/ethtool/cmis_cdb.c @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool.h> +#include <linux/jiffies.h> + +#include "common.h" +#include "module_fw.h" +#include "cmis.h" + +/* For accessing the LPL field on page 9Fh, the allowable length extension is + * min(i, 15) byte octets where i specifies the allowable additional number of + * byte octets in a READ or a WRITE. + */ +u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs) +{ + return 8 * (1 + min_t(u8, num_of_byte_octs, 15)); +} + +void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args, + enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl, + u8 lpl_len, u8 *epl, u16 epl_len, + u16 max_duration, u8 read_write_len_ext, + u16 msleep_pre_rpl, u8 rpl_exp_len, u8 flags) +{ + args->req.id = cpu_to_be16(cmd); + args->req.lpl_len = lpl_len; + if (lpl) + memcpy(args->req.payload, lpl, args->req.lpl_len); + if (epl) { + args->req.epl_len = cpu_to_be16(epl_len); + args->req.epl = epl; + } + + args->max_duration = max_duration; + args->read_write_len_ext = + ethtool_cmis_get_max_lpl_size(read_write_len_ext); + args->msleep_pre_rpl = msleep_pre_rpl; + args->rpl_exp_len = rpl_exp_len; + args->flags = flags; + args->err_msg = NULL; +} + +void ethtool_cmis_page_init(struct ethtool_module_eeprom *page_data, + u8 page, u32 offset, u32 length) +{ + page_data->page = page; + page_data->offset = offset; + page_data->length = length; + page_data->i2c_address = ETHTOOL_CMIS_CDB_PAGE_I2C_ADDR; +} + +#define CMIS_REVISION_PAGE 0x00 +#define CMIS_REVISION_OFFSET 0x01 + +struct cmis_rev_rpl { + u8 rev; +}; + +static u8 cmis_rev_rpl_major(struct cmis_rev_rpl *rpl) +{ + return rpl->rev >> 4; +} + +static int cmis_rev_major_get(struct net_device *dev, u8 *rev_major) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_module_eeprom page_data = {0}; + struct netlink_ext_ack extack = {}; + struct cmis_rev_rpl rpl = {}; + int err; + + ethtool_cmis_page_init(&page_data, CMIS_REVISION_PAGE, + CMIS_REVISION_OFFSET, sizeof(rpl)); + page_data.data = (u8 *)&rpl; + + err = ops->get_module_eeprom_by_page(dev, &page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + return err; + } + + *rev_major = cmis_rev_rpl_major(&rpl); + + return 0; +} + +#define CMIS_CDB_ADVERTISEMENT_PAGE 0x01 +#define CMIS_CDB_ADVERTISEMENT_OFFSET 0xA3 + +/* Based on section 8.4.11 "CDB Messaging Support Advertisement" in CMIS + * standard revision 5.2. + */ +struct cmis_cdb_advert_rpl { + u8 inst_supported; + u8 read_write_len_ext; + u8 resv1; + u8 resv2; +}; + +static u8 cmis_cdb_advert_rpl_inst_supported(struct cmis_cdb_advert_rpl *rpl) +{ + return rpl->inst_supported >> 6; +} + +static int cmis_cdb_advertisement_get(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_module_eeprom page_data = {}; + struct cmis_cdb_advert_rpl rpl = {}; + struct netlink_ext_ack extack = {}; + int err; + + ethtool_cmis_page_init(&page_data, CMIS_CDB_ADVERTISEMENT_PAGE, + CMIS_CDB_ADVERTISEMENT_OFFSET, sizeof(rpl)); + page_data.data = (u8 *)&rpl; + + err = ops->get_module_eeprom_by_page(dev, &page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + return err; + } + + if (!cmis_cdb_advert_rpl_inst_supported(&rpl)) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "CDB functionality is not supported", + NULL); + return -EOPNOTSUPP; + } + + cdb->read_write_len_ext = rpl.read_write_len_ext; + + return 0; +} + +#define CMIS_PASSWORD_ENTRY_PAGE 0x00 +#define CMIS_PASSWORD_ENTRY_OFFSET 0x7A + +struct cmis_password_entry_pl { + __be32 password; +}; + +/* See section 9.3.1 "CMD 0000h: Query Status" in CMIS standard revision 5.2. + * struct cmis_cdb_query_status_pl and struct cmis_cdb_query_status_rpl are + * structured layouts of the flat arrays, + * struct ethtool_cmis_cdb_request::payload and + * struct ethtool_cmis_cdb_rpl::payload respectively. + */ +struct cmis_cdb_query_status_pl { + u16 response_delay; +}; + +struct cmis_cdb_query_status_rpl { + u8 length; + u8 status; +}; + +static int +cmis_cdb_validate_password(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + const struct ethtool_module_fw_flash_params *params, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct cmis_cdb_query_status_pl qs_pl = {0}; + struct ethtool_module_eeprom page_data = {}; + struct ethtool_cmis_cdb_cmd_args args = {}; + struct cmis_password_entry_pl pe_pl = {}; + struct cmis_cdb_query_status_rpl *rpl; + struct netlink_ext_ack extack = {}; + int err; + + ethtool_cmis_page_init(&page_data, CMIS_PASSWORD_ENTRY_PAGE, + CMIS_PASSWORD_ENTRY_OFFSET, sizeof(pe_pl)); + page_data.data = (u8 *)&pe_pl; + + pe_pl = *((struct cmis_password_entry_pl *)page_data.data); + pe_pl.password = params->password; + err = ops->set_module_eeprom_by_page(dev, &page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + return err; + } + + ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS, + (u8 *)&qs_pl, sizeof(qs_pl), NULL, 0, 0, + cdb->read_write_len_ext, 1000, + sizeof(*rpl), + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Query Status command failed", + args.err_msg); + return err; + } + + rpl = (struct cmis_cdb_query_status_rpl *)args.req.payload; + if (!rpl->length || !rpl->status) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Password was not accepted", + NULL); + return -EINVAL; + } + + return 0; +} + +/* Some CDB commands asserts the CDB completion flag only from CMIS + * revision 5. Therefore, check the relevant validity flag only when + * the revision supports it. + */ +void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags) +{ + *flags |= cmis_rev >= 5 ? CDB_F_COMPLETION_VALID : 0; +} + +#define CMIS_CDB_MODULE_FEATURES_RESV_DATA 34 + +/* See section 9.4.1 "CMD 0040h: Module Features" in CMIS standard revision 5.2. + * struct cmis_cdb_module_features_rpl is structured layout of the flat + * array, ethtool_cmis_cdb_rpl::payload. + */ +struct cmis_cdb_module_features_rpl { + u8 resv1[CMIS_CDB_MODULE_FEATURES_RESV_DATA]; + __be16 max_completion_time; +}; + +static u16 +cmis_cdb_module_features_completion_time(struct cmis_cdb_module_features_rpl *rpl) +{ + return be16_to_cpu(rpl->max_completion_time); +} + +static int cmis_cdb_module_features_get(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + struct cmis_cdb_module_features_rpl *rpl; + u8 flags = CDB_F_STATUS_VALID; + int err; + + ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags); + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES, + NULL, 0, NULL, 0, 0, + cdb->read_write_len_ext, 1000, + sizeof(*rpl), flags); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Module Features command failed", + args.err_msg); + return err; + } + + rpl = (struct cmis_cdb_module_features_rpl *)args.req.payload; + cdb->max_completion_time = + cmis_cdb_module_features_completion_time(rpl); + + return 0; +} + +struct ethtool_cmis_cdb * +ethtool_cmis_cdb_init(struct net_device *dev, + const struct ethtool_module_fw_flash_params *params, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb *cdb; + int err; + + cdb = kzalloc(sizeof(*cdb), GFP_KERNEL); + if (!cdb) + return ERR_PTR(-ENOMEM); + + err = cmis_rev_major_get(dev, &cdb->cmis_rev); + if (err < 0) + goto err; + + if (cdb->cmis_rev < 4) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "CMIS revision doesn't support module firmware flashing", + NULL); + err = -EOPNOTSUPP; + goto err; + } + + err = cmis_cdb_advertisement_get(cdb, dev, ntf_params); + if (err < 0) + goto err; + + if (params->password_valid) { + err = cmis_cdb_validate_password(cdb, dev, params, ntf_params); + if (err < 0) + goto err; + } + + err = cmis_cdb_module_features_get(cdb, dev, ntf_params); + if (err < 0) + goto err; + + return cdb; + +err: + ethtool_cmis_cdb_fini(cdb); + return ERR_PTR(err); +} + +void ethtool_cmis_cdb_fini(struct ethtool_cmis_cdb *cdb) +{ + kfree(cdb); +} + +static bool is_completed(u8 data) +{ + return !!(data & 0x40); +} + +#define CMIS_CDB_STATUS_SUCCESS 0x01 + +static bool status_success(u8 data) +{ + return data == CMIS_CDB_STATUS_SUCCESS; +} + +#define CMIS_CDB_STATUS_FAIL 0x40 + +static bool status_fail(u8 data) +{ + return data & CMIS_CDB_STATUS_FAIL; +} + +struct cmis_wait_for_cond_rpl { + u8 state; +}; + +static int +ethtool_cmis_module_poll(struct net_device *dev, + struct cmis_wait_for_cond_rpl *rpl, u32 offset, + bool (*cond_success)(u8), bool (*cond_fail)(u8)) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_module_eeprom page_data = {0}; + struct netlink_ext_ack extack = {}; + int err; + + ethtool_cmis_page_init(&page_data, 0, offset, sizeof(*rpl)); + page_data.data = (u8 *)rpl; + + err = ops->get_module_eeprom_by_page(dev, &page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err_once(dev, "%s\n", extack._msg); + return -EBUSY; + } + + if ((*cond_success)(rpl->state)) + return 0; + + if (*cond_fail && (*cond_fail)(rpl->state)) + return -EIO; + + return -EBUSY; +} + +int ethtool_cmis_wait_for_cond(struct net_device *dev, u8 flags, u8 flag, + u16 max_duration, u32 offset, + bool (*cond_success)(u8), bool (*cond_fail)(u8), + u8 *state) +{ + struct cmis_wait_for_cond_rpl rpl = {}; + unsigned long end; + int err; + + if (!(flags & flag)) + return 0; + + if (max_duration == 0) + max_duration = U16_MAX; + + end = jiffies + msecs_to_jiffies(max_duration); + do { + err = ethtool_cmis_module_poll(dev, &rpl, offset, cond_success, + cond_fail); + if (err != -EBUSY) + goto out; + + msleep(20); + } while (time_before(jiffies, end)); + + err = ethtool_cmis_module_poll(dev, &rpl, offset, cond_success, + cond_fail); + if (err == -EBUSY) + err = -ETIMEDOUT; + +out: + *state = rpl.state; + return err; +} + +#define CMIS_CDB_COMPLETION_FLAG_OFFSET 0x08 + +static int cmis_cdb_wait_for_completion(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args) +{ + u8 flag; + int err; + + /* Some vendors demand waiting time before checking completion flag + * in some CDB commands. + */ + msleep(args->msleep_pre_rpl); + + err = ethtool_cmis_wait_for_cond(dev, args->flags, + CDB_F_COMPLETION_VALID, + args->max_duration, + CMIS_CDB_COMPLETION_FLAG_OFFSET, + is_completed, NULL, &flag); + if (err < 0) + args->err_msg = "Completion Flag did not set on time"; + + return err; +} + +#define CMIS_CDB_STATUS_OFFSET 0x25 + +static void cmis_cdb_status_fail_msg_get(u8 status, char **err_msg) +{ + switch (status) { + case 0b10000001: + *err_msg = "CDB Status is in progress: Busy capturing command"; + break; + case 0b10000010: + *err_msg = + "CDB Status is in progress: Busy checking/validating command"; + break; + case 0b10000011: + *err_msg = "CDB Status is in progress: Busy executing"; + break; + case 0b01000000: + *err_msg = "CDB status failed: no specific failure"; + break; + case 0b01000010: + *err_msg = + "CDB status failed: Parameter range error or parameter not supported"; + break; + case 0b01000101: + *err_msg = "CDB status failed: CdbChkCode error"; + break; + case 0b01000110: + *err_msg = "CDB status failed: Password error"; + break; + default: + *err_msg = "Unknown failure reason"; + } +}; + +static int cmis_cdb_wait_for_status(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args) +{ + u8 status; + int err; + + /* Some vendors demand waiting time before checking status in some + * CDB commands. + */ + msleep(args->msleep_pre_rpl); + + err = ethtool_cmis_wait_for_cond(dev, args->flags, CDB_F_STATUS_VALID, + args->max_duration, + CMIS_CDB_STATUS_OFFSET, + status_success, status_fail, &status); + if (err < 0 && !args->err_msg) + cmis_cdb_status_fail_msg_get(status, &args->err_msg); + + return err; +} + +#define CMIS_CDB_REPLY_OFFSET 0x86 + +static int cmis_cdb_process_reply(struct net_device *dev, + struct ethtool_module_eeprom *page_data, + struct ethtool_cmis_cdb_cmd_args *args) +{ + u8 rpl_hdr_len = sizeof(struct ethtool_cmis_cdb_rpl_hdr); + u8 rpl_exp_len = args->rpl_exp_len + rpl_hdr_len; + const struct ethtool_ops *ops = dev->ethtool_ops; + struct netlink_ext_ack extack = {}; + struct ethtool_cmis_cdb_rpl *rpl; + int err; + + if (!args->rpl_exp_len) + return 0; + + ethtool_cmis_page_init(page_data, ETHTOOL_CMIS_CDB_CMD_PAGE, + CMIS_CDB_REPLY_OFFSET, rpl_exp_len); + page_data->data = kmalloc(page_data->length, GFP_KERNEL); + if (!page_data->data) + return -ENOMEM; + + err = ops->get_module_eeprom_by_page(dev, page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + goto out; + } + + rpl = (struct ethtool_cmis_cdb_rpl *)page_data->data; + if ((args->rpl_exp_len > rpl->hdr.rpl_len + rpl_hdr_len) || + !rpl->hdr.rpl_chk_code) { + err = -EIO; + goto out; + } + + args->req.lpl_len = rpl->hdr.rpl_len; + memcpy(args->req.payload, rpl->payload, args->req.lpl_len); + +out: + kfree(page_data->data); + return err; +} + +static int +__ethtool_cmis_cdb_execute_cmd(struct net_device *dev, + struct ethtool_module_eeprom *page_data, + u8 page, u32 offset, u32 length, void *data) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct netlink_ext_ack extack = {}; + int err; + + ethtool_cmis_page_init(page_data, page, offset, length); + page_data->data = kmemdup(data, page_data->length, GFP_KERNEL); + if (!page_data->data) + return -ENOMEM; + + err = ops->set_module_eeprom_by_page(dev, page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + } + + kfree(page_data->data); + return err; +} + +#define CMIS_CDB_EPL_PAGE_START 0xA0 +#define CMIS_CDB_EPL_PAGE_END 0xAF +#define CMIS_CDB_EPL_FW_BLOCK_OFFSET_START 128 +#define CMIS_CDB_EPL_FW_BLOCK_OFFSET_END 255 + +static int +ethtool_cmis_cdb_execute_epl_cmd(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args, + struct ethtool_module_eeprom *page_data) +{ + u16 epl_len = be16_to_cpu(args->req.epl_len); + u32 bytes_written = 0; + u8 page; + int err; + + for (page = CMIS_CDB_EPL_PAGE_START; + page <= CMIS_CDB_EPL_PAGE_END && bytes_written < epl_len; page++) { + u16 offset = CMIS_CDB_EPL_FW_BLOCK_OFFSET_START; + + while (offset <= CMIS_CDB_EPL_FW_BLOCK_OFFSET_END && + bytes_written < epl_len) { + u32 bytes_left = epl_len - bytes_written; + u16 space_left, bytes_to_write; + + space_left = CMIS_CDB_EPL_FW_BLOCK_OFFSET_END - offset + 1; + bytes_to_write = min_t(u16, bytes_left, + min_t(u16, space_left, + args->read_write_len_ext)); + + err = __ethtool_cmis_cdb_execute_cmd(dev, page_data, + page, offset, + bytes_to_write, + args->req.epl + bytes_written); + if (err < 0) + return err; + + offset += bytes_to_write; + bytes_written += bytes_to_write; + } + } + return 0; +} + +static u8 cmis_cdb_calc_checksum(const void *data, size_t size) +{ + const u8 *bytes = (const u8 *)data; + u8 checksum = 0; + + for (size_t i = 0; i < size; i++) + checksum += bytes[i]; + + return ~checksum; +} + +#define CMIS_CDB_CMD_ID_OFFSET 0x80 + +int ethtool_cmis_cdb_execute_cmd(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args) +{ + struct ethtool_module_eeprom page_data = {}; + u32 offset; + int err; + + args->req.chk_code = + cmis_cdb_calc_checksum(&args->req, + offsetof(struct ethtool_cmis_cdb_request, + epl)); + + if (args->req.lpl_len > args->read_write_len_ext) { + args->err_msg = "LPL length is longer than CDB read write length extension allows"; + return -EINVAL; + } + + /* According to the CMIS standard, there are two options to trigger the + * CDB commands. The default option is triggering the command by writing + * the CMDID bytes. Therefore, the command will be split to 2 calls: + * First, with everything except the CMDID field and then the CMDID + * field. + */ + offset = CMIS_CDB_CMD_ID_OFFSET + + offsetof(struct ethtool_cmis_cdb_request, body); + err = __ethtool_cmis_cdb_execute_cmd(dev, &page_data, + ETHTOOL_CMIS_CDB_CMD_PAGE, offset, + sizeof(args->req.body), + &args->req.body); + if (err < 0) + return err; + + if (args->req.epl_len) { + err = ethtool_cmis_cdb_execute_epl_cmd(dev, args, &page_data); + if (err < 0) + return err; + } + + offset = CMIS_CDB_CMD_ID_OFFSET + + offsetof(struct ethtool_cmis_cdb_request, id); + err = __ethtool_cmis_cdb_execute_cmd(dev, &page_data, + ETHTOOL_CMIS_CDB_CMD_PAGE, offset, + sizeof(args->req.id), + &args->req.id); + if (err < 0) + return err; + + err = cmis_cdb_wait_for_completion(dev, args); + if (err < 0) + return err; + + err = cmis_cdb_wait_for_status(dev, args); + if (err < 0) + return err; + + return cmis_cdb_process_reply(dev, &page_data, args); +} diff --git a/net/ethtool/cmis_fw_update.c b/net/ethtool/cmis_fw_update.c new file mode 100644 index 000000000000..df5f344209c4 --- /dev/null +++ b/net/ethtool/cmis_fw_update.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool.h> +#include <linux/firmware.h> +#include <net/netdev_lock.h> + +#include "common.h" +#include "module_fw.h" +#include "cmis.h" + +struct cmis_fw_update_fw_mng_features { + u8 start_cmd_payload_size; + u8 write_mechanism; + u16 max_duration_start; + u16 max_duration_write; + u16 max_duration_complete; +}; + +/* See section 9.4.2 "CMD 0041h: Firmware Management Features" in CMIS standard + * revision 5.2. + * struct cmis_cdb_fw_mng_features_rpl is a structured layout of the flat + * array, ethtool_cmis_cdb_rpl::payload. + */ +struct cmis_cdb_fw_mng_features_rpl { + u8 resv1; + u8 resv2; + u8 start_cmd_payload_size; + u8 resv3; + u8 read_write_len_ext; + u8 write_mechanism; + u8 resv4; + u8 resv5; + __be16 max_duration_start; + __be16 resv6; + __be16 max_duration_write; + __be16 max_duration_complete; + __be16 resv7; +}; + +enum cmis_cdb_fw_write_mechanism { + CMIS_CDB_FW_WRITE_MECHANISM_NONE = 0x00, + CMIS_CDB_FW_WRITE_MECHANISM_LPL = 0x01, + CMIS_CDB_FW_WRITE_MECHANISM_EPL = 0x10, + CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 0x11, +}; + +static int +cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + struct cmis_fw_update_fw_mng_features *fw_mng, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + struct cmis_cdb_fw_mng_features_rpl *rpl; + u8 flags = CDB_F_STATUS_VALID; + int err; + + ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags); + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES, + NULL, 0, NULL, 0, + cdb->max_completion_time, + cdb->read_write_len_ext, 1000, + sizeof(*rpl), flags); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "FW Management Features command failed", + args.err_msg); + return err; + } + + rpl = (struct cmis_cdb_fw_mng_features_rpl *)args.req.payload; + if (rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_NONE) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "CDB write mechanism is not supported", + NULL); + return -EOPNOTSUPP; + } + + /* Above, we used read_write_len_ext that we got from CDB + * advertisement. Update it with the value that we got from module + * features query, which is specific for Firmware Management Commands + * (IDs 0100h-01FFh). + */ + cdb->read_write_len_ext = rpl->read_write_len_ext; + fw_mng->start_cmd_payload_size = rpl->start_cmd_payload_size; + fw_mng->write_mechanism = + rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL ? + CMIS_CDB_FW_WRITE_MECHANISM_LPL : + CMIS_CDB_FW_WRITE_MECHANISM_EPL; + fw_mng->max_duration_start = be16_to_cpu(rpl->max_duration_start); + fw_mng->max_duration_write = be16_to_cpu(rpl->max_duration_write); + fw_mng->max_duration_complete = be16_to_cpu(rpl->max_duration_complete); + + return 0; +} + +/* See section 9.7.2 "CMD 0101h: Start Firmware Download" in CMIS standard + * revision 5.2. + * struct cmis_cdb_start_fw_download_pl is a structured layout of the + * flat array, ethtool_cmis_cdb_request::payload. + */ +struct cmis_cdb_start_fw_download_pl { + __struct_group(cmis_cdb_start_fw_download_pl_h, head, /* no attrs */, + __be32 image_size; + __be32 resv1; + ); + u8 vendor_data[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH - + sizeof(struct cmis_cdb_start_fw_download_pl_h)]; +}; + +static int +cmis_fw_update_start_download(struct ethtool_cmis_cdb *cdb, + struct ethtool_cmis_fw_update_params *fw_update, + struct cmis_fw_update_fw_mng_features *fw_mng) +{ + u8 vendor_data_size = fw_mng->start_cmd_payload_size; + struct cmis_cdb_start_fw_download_pl pl = {}; + struct ethtool_cmis_cdb_cmd_args args = {}; + u8 lpl_len; + int err; + + pl.image_size = cpu_to_be32(fw_update->fw->size); + memcpy(pl.vendor_data, fw_update->fw->data, vendor_data_size); + + lpl_len = offsetof(struct cmis_cdb_start_fw_download_pl, + vendor_data[vendor_data_size]); + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD, + (u8 *)&pl, lpl_len, NULL, 0, + fw_mng->max_duration_start, + cdb->read_write_len_ext, 1000, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args); + if (err < 0) + ethnl_module_fw_flash_ntf_err(fw_update->dev, + &fw_update->ntf_params, + "Start FW download command failed", + args.err_msg); + + return err; +} + +/* See section 9.7.4 "CMD 0103h: Write Firmware Block LPL" in CMIS standard + * revision 5.2. + * struct cmis_cdb_write_fw_block_lpl_pl is a structured layout of the + * flat array, ethtool_cmis_cdb_request::payload. + */ +struct cmis_cdb_write_fw_block_lpl_pl { + __be32 block_address; + u8 fw_block[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH - sizeof(__be32)]; +}; + +static int +cmis_fw_update_write_image_lpl(struct ethtool_cmis_cdb *cdb, + struct ethtool_cmis_fw_update_params *fw_update, + struct cmis_fw_update_fw_mng_features *fw_mng) +{ + u8 start = fw_mng->start_cmd_payload_size; + u32 offset, max_block_size, max_lpl_len; + u32 image_size = fw_update->fw->size; + int err; + + max_lpl_len = min_t(u32, + ethtool_cmis_get_max_lpl_size(cdb->read_write_len_ext), + ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH); + max_block_size = + max_lpl_len - sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl, + block_address); + + for (offset = start; offset < image_size; offset += max_block_size) { + struct cmis_cdb_write_fw_block_lpl_pl pl = { + .block_address = cpu_to_be32(offset - start), + }; + struct ethtool_cmis_cdb_cmd_args args = {}; + u32 block_size, lpl_len; + + ethnl_module_fw_flash_ntf_in_progress(fw_update->dev, + &fw_update->ntf_params, + offset - start, + image_size); + block_size = min_t(u32, max_block_size, image_size - offset); + memcpy(pl.fw_block, &fw_update->fw->data[offset], block_size); + lpl_len = block_size + + sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl, + block_address); + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL, + (u8 *)&pl, lpl_len, NULL, 0, + fw_mng->max_duration_write, + cdb->read_write_len_ext, 1, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(fw_update->dev, + &fw_update->ntf_params, + "Write FW block LPL command failed", + args.err_msg); + return err; + } + } + + return 0; +} + +struct cmis_cdb_write_fw_block_epl_pl { + u8 fw_block[ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH]; +}; + +static int +cmis_fw_update_write_image_epl(struct ethtool_cmis_cdb *cdb, + struct ethtool_cmis_fw_update_params *fw_update, + struct cmis_fw_update_fw_mng_features *fw_mng) +{ + u8 start = fw_mng->start_cmd_payload_size; + u32 image_size = fw_update->fw->size; + u32 offset, lpl_len; + int err; + + lpl_len = sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl, + block_address); + + for (offset = start; offset < image_size; + offset += ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH) { + struct cmis_cdb_write_fw_block_lpl_pl lpl = { + .block_address = cpu_to_be32(offset - start), + }; + struct cmis_cdb_write_fw_block_epl_pl *epl; + struct ethtool_cmis_cdb_cmd_args args = {}; + u32 epl_len; + + ethnl_module_fw_flash_ntf_in_progress(fw_update->dev, + &fw_update->ntf_params, + offset - start, + image_size); + + epl_len = min_t(u32, ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH, + image_size - offset); + epl = kmalloc_array(epl_len, sizeof(u8), GFP_KERNEL); + if (!epl) + return -ENOMEM; + + memcpy(epl->fw_block, &fw_update->fw->data[offset], epl_len); + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL, + (u8 *)&lpl, lpl_len, (u8 *)epl, + epl_len, + fw_mng->max_duration_write, + cdb->read_write_len_ext, 1, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args); + kfree(epl); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(fw_update->dev, + &fw_update->ntf_params, + "Write FW block EPL command failed", + args.err_msg); + return err; + } + } + + return 0; +} + +static int +cmis_fw_update_complete_download(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + struct cmis_fw_update_fw_mng_features *fw_mng, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + int err; + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD, + NULL, 0, NULL, 0, + fw_mng->max_duration_complete, + cdb->read_write_len_ext, 1000, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Complete FW download command failed", + args.err_msg); + + return err; +} + +static int +cmis_fw_update_download_image(struct ethtool_cmis_cdb *cdb, + struct ethtool_cmis_fw_update_params *fw_update, + struct cmis_fw_update_fw_mng_features *fw_mng) +{ + int err; + + err = cmis_fw_update_start_download(cdb, fw_update, fw_mng); + if (err < 0) + return err; + + if (fw_mng->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL) { + err = cmis_fw_update_write_image_lpl(cdb, fw_update, fw_mng); + if (err < 0) + return err; + } else { + err = cmis_fw_update_write_image_epl(cdb, fw_update, fw_mng); + if (err < 0) + return err; + } + + err = cmis_fw_update_complete_download(cdb, fw_update->dev, fw_mng, + &fw_update->ntf_params); + if (err < 0) + return err; + + return 0; +} + +enum { + CMIS_MODULE_LOW_PWR = 1, + CMIS_MODULE_READY = 3, +}; + +static bool module_is_ready(u8 data) +{ + u8 state = (data >> 1) & 7; + + return state == CMIS_MODULE_READY || state == CMIS_MODULE_LOW_PWR; +} + +#define CMIS_MODULE_READY_MAX_DURATION_MSEC 1000 +#define CMIS_MODULE_STATE_OFFSET 3 + +static int +cmis_fw_update_wait_for_module_state(struct net_device *dev, u8 flags) +{ + u8 state; + + return ethtool_cmis_wait_for_cond(dev, flags, CDB_F_MODULE_STATE_VALID, + CMIS_MODULE_READY_MAX_DURATION_MSEC, + CMIS_MODULE_STATE_OFFSET, + module_is_ready, NULL, &state); +} + +/* See section 9.7.10 "CMD 0109h: Run Firmware Image" in CMIS standard + * revision 5.2. + * struct cmis_cdb_run_fw_image_pl is a structured layout of the flat + * array, ethtool_cmis_cdb_request::payload. + */ +struct cmis_cdb_run_fw_image_pl { + u8 resv1; + u8 image_to_run; + u16 delay_to_reset; +}; + +static int +cmis_fw_update_run_image(struct ethtool_cmis_cdb *cdb, struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + struct cmis_cdb_run_fw_image_pl pl = {0}; + int err; + + ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE, + (u8 *)&pl, sizeof(pl), NULL, 0, + cdb->max_completion_time, + cdb->read_write_len_ext, 1000, 0, + CDB_F_MODULE_STATE_VALID); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Run image command failed", + args.err_msg); + return err; + } + + err = cmis_fw_update_wait_for_module_state(dev, args.flags); + if (err < 0) + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Module is not ready on time after reset", + NULL); + + return err; +} + +static int +cmis_fw_update_commit_image(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + int err; + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE, + NULL, 0, NULL, 0, + cdb->max_completion_time, + cdb->read_write_len_ext, 1000, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Commit image command failed", + args.err_msg); + + return err; +} + +static int cmis_fw_update_reset(struct net_device *dev) +{ + __u32 reset_data = ETH_RESET_PHY; + int ret; + + netdev_lock_ops(dev); + ret = dev->ethtool_ops->reset(dev, &reset_data); + netdev_unlock_ops(dev); + + return ret; +} + +void +ethtool_cmis_fw_update(struct ethtool_cmis_fw_update_params *fw_update) +{ + struct ethnl_module_fw_flash_ntf_params *ntf_params = + &fw_update->ntf_params; + struct cmis_fw_update_fw_mng_features fw_mng = {0}; + struct net_device *dev = fw_update->dev; + struct ethtool_cmis_cdb *cdb; + int err; + + cdb = ethtool_cmis_cdb_init(dev, &fw_update->params, ntf_params); + if (IS_ERR(cdb)) + goto err_send_ntf; + + ethnl_module_fw_flash_ntf_start(dev, ntf_params); + + err = cmis_fw_update_fw_mng_features_get(cdb, dev, &fw_mng, ntf_params); + if (err < 0) + goto err_cdb_fini; + + err = cmis_fw_update_download_image(cdb, fw_update, &fw_mng); + if (err < 0) + goto err_cdb_fini; + + err = cmis_fw_update_run_image(cdb, dev, ntf_params); + if (err < 0) + goto err_cdb_fini; + + /* The CDB command "Run Firmware Image" resets the firmware, so the new + * one might have different settings. + * Free the old CDB instance, and init a new one. + */ + ethtool_cmis_cdb_fini(cdb); + + cdb = ethtool_cmis_cdb_init(dev, &fw_update->params, ntf_params); + if (IS_ERR(cdb)) + goto err_send_ntf; + + err = cmis_fw_update_commit_image(cdb, dev, ntf_params); + if (err < 0) + goto err_cdb_fini; + + err = cmis_fw_update_reset(dev); + if (err < 0) + goto err_cdb_fini; + + ethnl_module_fw_flash_ntf_complete(dev, ntf_params); + ethtool_cmis_cdb_fini(cdb); + return; + +err_cdb_fini: + ethtool_cmis_cdb_fini(cdb); +err_send_ntf: + ethnl_module_fw_flash_ntf_err(dev, ntf_params, NULL, NULL); +} diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c new file mode 100644 index 000000000000..3e18ca1ccc5e --- /dev/null +++ b/net/ethtool/coalesce.c @@ -0,0 +1,649 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/dim.h> +#include "netlink.h" +#include "common.h" + +struct coalesce_req_info { + struct ethnl_req_info base; +}; + +struct coalesce_reply_data { + struct ethnl_reply_data base; + struct ethtool_coalesce coalesce; + struct kernel_ethtool_coalesce kernel_coalesce; + u32 supported_params; +}; + +#define COALESCE_REPDATA(__reply_base) \ + container_of(__reply_base, struct coalesce_reply_data, base) + +#define __SUPPORTED_OFFSET ETHTOOL_A_COALESCE_RX_USECS +static u32 attr_to_mask(unsigned int attr_type) +{ + return BIT(attr_type - __SUPPORTED_OFFSET); +} + +/* build time check that indices in ethtool_ops::supported_coalesce_params + * match corresponding attribute types with an offset + */ +#define __CHECK_SUPPORTED_OFFSET(x) \ + static_assert((ETHTOOL_ ## x) == \ + BIT((ETHTOOL_A_ ## x) - __SUPPORTED_OFFSET)) +__CHECK_SUPPORTED_OFFSET(COALESCE_RX_USECS); +__CHECK_SUPPORTED_OFFSET(COALESCE_RX_MAX_FRAMES); +__CHECK_SUPPORTED_OFFSET(COALESCE_RX_USECS_IRQ); +__CHECK_SUPPORTED_OFFSET(COALESCE_RX_MAX_FRAMES_IRQ); +__CHECK_SUPPORTED_OFFSET(COALESCE_TX_USECS); +__CHECK_SUPPORTED_OFFSET(COALESCE_TX_MAX_FRAMES); +__CHECK_SUPPORTED_OFFSET(COALESCE_TX_USECS_IRQ); +__CHECK_SUPPORTED_OFFSET(COALESCE_TX_MAX_FRAMES_IRQ); +__CHECK_SUPPORTED_OFFSET(COALESCE_STATS_BLOCK_USECS); +__CHECK_SUPPORTED_OFFSET(COALESCE_USE_ADAPTIVE_RX); +__CHECK_SUPPORTED_OFFSET(COALESCE_USE_ADAPTIVE_TX); +__CHECK_SUPPORTED_OFFSET(COALESCE_PKT_RATE_LOW); +__CHECK_SUPPORTED_OFFSET(COALESCE_RX_USECS_LOW); +__CHECK_SUPPORTED_OFFSET(COALESCE_RX_MAX_FRAMES_LOW); +__CHECK_SUPPORTED_OFFSET(COALESCE_TX_USECS_LOW); +__CHECK_SUPPORTED_OFFSET(COALESCE_TX_MAX_FRAMES_LOW); +__CHECK_SUPPORTED_OFFSET(COALESCE_PKT_RATE_HIGH); +__CHECK_SUPPORTED_OFFSET(COALESCE_RX_USECS_HIGH); +__CHECK_SUPPORTED_OFFSET(COALESCE_RX_MAX_FRAMES_HIGH); +__CHECK_SUPPORTED_OFFSET(COALESCE_TX_USECS_HIGH); +__CHECK_SUPPORTED_OFFSET(COALESCE_TX_MAX_FRAMES_HIGH); +__CHECK_SUPPORTED_OFFSET(COALESCE_RATE_SAMPLE_INTERVAL); + +const struct nla_policy ethnl_coalesce_get_policy[] = { + [ETHTOOL_A_COALESCE_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int coalesce_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct coalesce_reply_data *data = COALESCE_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + if (!dev->ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + data->supported_params = dev->ethtool_ops->supported_coalesce_params; + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + ret = dev->ethtool_ops->get_coalesce(dev, &data->coalesce, + &data->kernel_coalesce, + info->extack); + ethnl_ops_complete(dev); + + return ret; +} + +static int coalesce_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + int modersz = nla_total_size(0) + /* _PROFILE_IRQ_MODERATION, nest */ + nla_total_size(sizeof(u32)) + /* _IRQ_MODERATION_USEC */ + nla_total_size(sizeof(u32)) + /* _IRQ_MODERATION_PKTS */ + nla_total_size(sizeof(u32)); /* _IRQ_MODERATION_COMPS */ + + int total_modersz = nla_total_size(0) + /* _{R,T}X_PROFILE, nest */ + modersz * NET_DIM_PARAMS_NUM_PROFILES; + + return nla_total_size(sizeof(u32)) + /* _RX_USECS */ + nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES */ + nla_total_size(sizeof(u32)) + /* _RX_USECS_IRQ */ + nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES_IRQ */ + nla_total_size(sizeof(u32)) + /* _TX_USECS */ + nla_total_size(sizeof(u32)) + /* _TX_MAX_FRAMES */ + nla_total_size(sizeof(u32)) + /* _TX_USECS_IRQ */ + nla_total_size(sizeof(u32)) + /* _TX_MAX_FRAMES_IRQ */ + nla_total_size(sizeof(u32)) + /* _STATS_BLOCK_USECS */ + nla_total_size(sizeof(u8)) + /* _USE_ADAPTIVE_RX */ + nla_total_size(sizeof(u8)) + /* _USE_ADAPTIVE_TX */ + nla_total_size(sizeof(u32)) + /* _PKT_RATE_LOW */ + nla_total_size(sizeof(u32)) + /* _RX_USECS_LOW */ + nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES_LOW */ + nla_total_size(sizeof(u32)) + /* _TX_USECS_LOW */ + nla_total_size(sizeof(u32)) + /* _TX_MAX_FRAMES_LOW */ + nla_total_size(sizeof(u32)) + /* _PKT_RATE_HIGH */ + nla_total_size(sizeof(u32)) + /* _RX_USECS_HIGH */ + nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES_HIGH */ + nla_total_size(sizeof(u32)) + /* _TX_USECS_HIGH */ + nla_total_size(sizeof(u32)) + /* _TX_MAX_FRAMES_HIGH */ + nla_total_size(sizeof(u32)) + /* _RATE_SAMPLE_INTERVAL */ + nla_total_size(sizeof(u8)) + /* _USE_CQE_MODE_TX */ + nla_total_size(sizeof(u8)) + /* _USE_CQE_MODE_RX */ + nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_BYTES */ + nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_FRAMES */ + nla_total_size(sizeof(u32)) + /* _TX_AGGR_TIME_USECS */ + total_modersz * 2; /* _{R,T}X_PROFILE */ +} + +static bool coalesce_put_u32(struct sk_buff *skb, u16 attr_type, u32 val, + u32 supported_params) +{ + if (!val && !(supported_params & attr_to_mask(attr_type))) + return false; + return nla_put_u32(skb, attr_type, val); +} + +static bool coalesce_put_bool(struct sk_buff *skb, u16 attr_type, u32 val, + u32 supported_params) +{ + if (!val && !(supported_params & attr_to_mask(attr_type))) + return false; + return nla_put_u8(skb, attr_type, !!val); +} + +/** + * coalesce_put_profile - fill reply with a nla nest with four child nla nests. + * @skb: socket buffer the message is stored in + * @attr_type: nest attr type ETHTOOL_A_COALESCE_*X_PROFILE + * @profile: data passed to userspace + * @coal_flags: modifiable parameters supported by the driver + * + * Put a dim profile nest attribute. Refer to ETHTOOL_A_PROFILE_IRQ_MODERATION. + * + * Return: 0 on success or a negative error code. + */ +static int coalesce_put_profile(struct sk_buff *skb, u16 attr_type, + const struct dim_cq_moder *profile, + u8 coal_flags) +{ + struct nlattr *profile_attr, *moder_attr; + int i, ret; + + if (!profile || !coal_flags) + return 0; + + profile_attr = nla_nest_start(skb, attr_type); + if (!profile_attr) + return -EMSGSIZE; + + for (i = 0; i < NET_DIM_PARAMS_NUM_PROFILES; i++) { + moder_attr = nla_nest_start(skb, + ETHTOOL_A_PROFILE_IRQ_MODERATION); + if (!moder_attr) { + ret = -EMSGSIZE; + goto cancel_profile; + } + + if (coal_flags & DIM_COALESCE_USEC) { + ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_USEC, + profile[i].usec); + if (ret) + goto cancel_moder; + } + + if (coal_flags & DIM_COALESCE_PKTS) { + ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_PKTS, + profile[i].pkts); + if (ret) + goto cancel_moder; + } + + if (coal_flags & DIM_COALESCE_COMPS) { + ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_COMPS, + profile[i].comps); + if (ret) + goto cancel_moder; + } + + nla_nest_end(skb, moder_attr); + } + + nla_nest_end(skb, profile_attr); + + return 0; + +cancel_moder: + nla_nest_cancel(skb, moder_attr); +cancel_profile: + nla_nest_cancel(skb, profile_attr); + return ret; +} + +static int coalesce_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct coalesce_reply_data *data = COALESCE_REPDATA(reply_base); + const struct kernel_ethtool_coalesce *kcoal = &data->kernel_coalesce; + const struct ethtool_coalesce *coal = &data->coalesce; + u32 supported = data->supported_params; + struct dim_irq_moder *moder; + int ret = 0; + + if (coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS, + coal->rx_coalesce_usecs, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_MAX_FRAMES, + coal->rx_max_coalesced_frames, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS_IRQ, + coal->rx_coalesce_usecs_irq, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ, + coal->rx_max_coalesced_frames_irq, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_USECS, + coal->tx_coalesce_usecs, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_MAX_FRAMES, + coal->tx_max_coalesced_frames, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_USECS_IRQ, + coal->tx_coalesce_usecs_irq, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ, + coal->tx_max_coalesced_frames_irq, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_STATS_BLOCK_USECS, + coal->stats_block_coalesce_usecs, supported) || + coalesce_put_bool(skb, ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX, + coal->use_adaptive_rx_coalesce, supported) || + coalesce_put_bool(skb, ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX, + coal->use_adaptive_tx_coalesce, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_PKT_RATE_LOW, + coal->pkt_rate_low, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS_LOW, + coal->rx_coalesce_usecs_low, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW, + coal->rx_max_coalesced_frames_low, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_USECS_LOW, + coal->tx_coalesce_usecs_low, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW, + coal->tx_max_coalesced_frames_low, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_PKT_RATE_HIGH, + coal->pkt_rate_high, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS_HIGH, + coal->rx_coalesce_usecs_high, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH, + coal->rx_max_coalesced_frames_high, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_USECS_HIGH, + coal->tx_coalesce_usecs_high, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, + coal->tx_max_coalesced_frames_high, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, + coal->rate_sample_interval, supported) || + coalesce_put_bool(skb, ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, + kcoal->use_cqe_mode_tx, supported) || + coalesce_put_bool(skb, ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, + kcoal->use_cqe_mode_rx, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, + kcoal->tx_aggr_max_bytes, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, + kcoal->tx_aggr_max_frames, supported) || + coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, + kcoal->tx_aggr_time_usecs, supported)) + return -EMSGSIZE; + + if (!req_base->dev || !req_base->dev->irq_moder) + return 0; + + moder = req_base->dev->irq_moder; + rcu_read_lock(); + if (moder->profile_flags & DIM_PROFILE_RX) { + ret = coalesce_put_profile(skb, ETHTOOL_A_COALESCE_RX_PROFILE, + rcu_dereference(moder->rx_profile), + moder->coal_flags); + if (ret) + goto out; + } + + if (moder->profile_flags & DIM_PROFILE_TX) + ret = coalesce_put_profile(skb, ETHTOOL_A_COALESCE_TX_PROFILE, + rcu_dereference(moder->tx_profile), + moder->coal_flags); + +out: + rcu_read_unlock(); + return ret; +} + +/* COALESCE_SET */ + +static const struct nla_policy coalesce_irq_moderation_policy[] = { + [ETHTOOL_A_IRQ_MODERATION_USEC] = { .type = NLA_U32 }, + [ETHTOOL_A_IRQ_MODERATION_PKTS] = { .type = NLA_U32 }, + [ETHTOOL_A_IRQ_MODERATION_COMPS] = { .type = NLA_U32 }, +}; + +static const struct nla_policy coalesce_profile_policy[] = { + [ETHTOOL_A_PROFILE_IRQ_MODERATION] = + NLA_POLICY_NESTED(coalesce_irq_moderation_policy), +}; + +const struct nla_policy ethnl_coalesce_set_policy[] = { + [ETHTOOL_A_COALESCE_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_COALESCE_RX_USECS] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RX_MAX_FRAMES] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RX_USECS_IRQ] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_USECS] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_MAX_FRAMES] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_USECS_IRQ] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_STATS_BLOCK_USECS] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX] = { .type = NLA_U8 }, + [ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX] = { .type = NLA_U8 }, + [ETHTOOL_A_COALESCE_PKT_RATE_LOW] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RX_USECS_LOW] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_USECS_LOW] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_PKT_RATE_HIGH] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RX_USECS_HIGH] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_USECS_HIGH] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_USE_CQE_MODE_TX] = NLA_POLICY_MAX(NLA_U8, 1), + [ETHTOOL_A_COALESCE_USE_CQE_MODE_RX] = NLA_POLICY_MAX(NLA_U8, 1), + [ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RX_PROFILE] = + NLA_POLICY_NESTED(coalesce_profile_policy), + [ETHTOOL_A_COALESCE_TX_PROFILE] = + NLA_POLICY_NESTED(coalesce_profile_policy), +}; + +static int +ethnl_set_coalesce_validate(struct ethnl_req_info *req_info, + struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + struct dim_irq_moder *irq_moder = req_info->dev->irq_moder; + struct nlattr **tb = info->attrs; + u32 supported_params; + u16 a; + + if (!ops->get_coalesce || !ops->set_coalesce) + return -EOPNOTSUPP; + + /* make sure that only supported parameters are present */ + supported_params = ops->supported_coalesce_params; + if (irq_moder && irq_moder->profile_flags & DIM_PROFILE_RX) + supported_params |= ETHTOOL_COALESCE_RX_PROFILE; + + if (irq_moder && irq_moder->profile_flags & DIM_PROFILE_TX) + supported_params |= ETHTOOL_COALESCE_TX_PROFILE; + + for (a = ETHTOOL_A_COALESCE_RX_USECS; a < __ETHTOOL_A_COALESCE_CNT; a++) + if (tb[a] && !(supported_params & attr_to_mask(a))) { + NL_SET_ERR_MSG_ATTR(info->extack, tb[a], + "cannot modify an unsupported parameter"); + return -EINVAL; + } + + return 1; +} + +/** + * ethnl_update_irq_moder - update a specific field in the given profile + * @irq_moder: place that collects dim related information + * @irq_field: field in profile to modify + * @attr_type: attr type ETHTOOL_A_IRQ_MODERATION_* + * @tb: netlink attribute with new values or null + * @coal_bit: DIM_COALESCE_* bit from coal_flags + * @mod: pointer to bool for modification tracking + * @extack: netlink extended ack + * + * Return: 0 on success or a negative error code. + */ +static int ethnl_update_irq_moder(struct dim_irq_moder *irq_moder, + u16 *irq_field, u16 attr_type, + struct nlattr **tb, + u8 coal_bit, bool *mod, + struct netlink_ext_ack *extack) +{ + int ret = 0; + u32 val; + + if (!tb[attr_type]) + return 0; + + if (irq_moder->coal_flags & coal_bit) { + val = nla_get_u32(tb[attr_type]); + if (*irq_field == val) + return 0; + + *irq_field = val; + *mod = true; + } else { + NL_SET_BAD_ATTR(extack, tb[attr_type]); + ret = -EOPNOTSUPP; + } + + return ret; +} + +/** + * ethnl_update_profile - get a profile nest with child nests from userspace. + * @dev: netdevice to update the profile + * @dst: profile get from the driver and modified by ethnl_update_profile. + * @nests: nest attr ETHTOOL_A_COALESCE_*X_PROFILE to set profile. + * @mod: pointer to bool for modification tracking + * @extack: Netlink extended ack + * + * Layout of nests: + * Nested ETHTOOL_A_COALESCE_*X_PROFILE attr + * Nested ETHTOOL_A_PROFILE_IRQ_MODERATION attr + * ETHTOOL_A_IRQ_MODERATION_USEC attr + * ETHTOOL_A_IRQ_MODERATION_PKTS attr + * ETHTOOL_A_IRQ_MODERATION_COMPS attr + * ... + * Nested ETHTOOL_A_PROFILE_IRQ_MODERATION attr + * ETHTOOL_A_IRQ_MODERATION_USEC attr + * ETHTOOL_A_IRQ_MODERATION_PKTS attr + * ETHTOOL_A_IRQ_MODERATION_COMPS attr + * + * Return: 0 on success or a negative error code. + */ +static int ethnl_update_profile(struct net_device *dev, + struct dim_cq_moder __rcu **dst, + const struct nlattr *nests, + bool *mod, + struct netlink_ext_ack *extack) +{ + int len_irq_moder = ARRAY_SIZE(coalesce_irq_moderation_policy); + struct nlattr *tb[ARRAY_SIZE(coalesce_irq_moderation_policy)]; + struct dim_irq_moder *irq_moder = dev->irq_moder; + struct dim_cq_moder *new_profile, *old_profile; + int ret, rem, i = 0, len; + struct nlattr *nest; + + if (!nests) + return 0; + + if (!*dst) + return -EOPNOTSUPP; + + old_profile = rtnl_dereference(*dst); + len = NET_DIM_PARAMS_NUM_PROFILES * sizeof(*old_profile); + new_profile = kmemdup(old_profile, len, GFP_KERNEL); + if (!new_profile) + return -ENOMEM; + + nla_for_each_nested_type(nest, ETHTOOL_A_PROFILE_IRQ_MODERATION, + nests, rem) { + ret = nla_parse_nested(tb, len_irq_moder - 1, nest, + coalesce_irq_moderation_policy, + extack); + if (ret) + goto err_out; + + ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].usec, + ETHTOOL_A_IRQ_MODERATION_USEC, + tb, DIM_COALESCE_USEC, + mod, extack); + if (ret) + goto err_out; + + ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].pkts, + ETHTOOL_A_IRQ_MODERATION_PKTS, + tb, DIM_COALESCE_PKTS, + mod, extack); + if (ret) + goto err_out; + + ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].comps, + ETHTOOL_A_IRQ_MODERATION_COMPS, + tb, DIM_COALESCE_COMPS, + mod, extack); + if (ret) + goto err_out; + + i++; + } + + /* After the profile is modified, dim itself is a dynamic + * mechanism and will quickly fit to the appropriate + * coalescing parameters according to the new profile. + */ + rcu_assign_pointer(*dst, new_profile); + kfree_rcu(old_profile, rcu); + + return 0; + +err_out: + kfree(new_profile); + return ret; +} + +static int +__ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info, + bool *dual_change) +{ + struct kernel_ethtool_coalesce kernel_coalesce = {}; + struct net_device *dev = req_info->dev; + struct ethtool_coalesce coalesce = {}; + bool mod_mode = false, mod = false; + struct nlattr **tb = info->attrs; + int ret; + + ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, + info->extack); + if (ret < 0) + return ret; + + /* Update values */ + ethnl_update_u32(&coalesce.rx_coalesce_usecs, + tb[ETHTOOL_A_COALESCE_RX_USECS], &mod); + ethnl_update_u32(&coalesce.rx_max_coalesced_frames, + tb[ETHTOOL_A_COALESCE_RX_MAX_FRAMES], &mod); + ethnl_update_u32(&coalesce.rx_coalesce_usecs_irq, + tb[ETHTOOL_A_COALESCE_RX_USECS_IRQ], &mod); + ethnl_update_u32(&coalesce.rx_max_coalesced_frames_irq, + tb[ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ], &mod); + ethnl_update_u32(&coalesce.tx_coalesce_usecs, + tb[ETHTOOL_A_COALESCE_TX_USECS], &mod); + ethnl_update_u32(&coalesce.tx_max_coalesced_frames, + tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES], &mod); + ethnl_update_u32(&coalesce.tx_coalesce_usecs_irq, + tb[ETHTOOL_A_COALESCE_TX_USECS_IRQ], &mod); + ethnl_update_u32(&coalesce.tx_max_coalesced_frames_irq, + tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ], &mod); + ethnl_update_u32(&coalesce.stats_block_coalesce_usecs, + tb[ETHTOOL_A_COALESCE_STATS_BLOCK_USECS], &mod); + ethnl_update_u32(&coalesce.pkt_rate_low, + tb[ETHTOOL_A_COALESCE_PKT_RATE_LOW], &mod); + ethnl_update_u32(&coalesce.rx_coalesce_usecs_low, + tb[ETHTOOL_A_COALESCE_RX_USECS_LOW], &mod); + ethnl_update_u32(&coalesce.rx_max_coalesced_frames_low, + tb[ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW], &mod); + ethnl_update_u32(&coalesce.tx_coalesce_usecs_low, + tb[ETHTOOL_A_COALESCE_TX_USECS_LOW], &mod); + ethnl_update_u32(&coalesce.tx_max_coalesced_frames_low, + tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW], &mod); + ethnl_update_u32(&coalesce.pkt_rate_high, + tb[ETHTOOL_A_COALESCE_PKT_RATE_HIGH], &mod); + ethnl_update_u32(&coalesce.rx_coalesce_usecs_high, + tb[ETHTOOL_A_COALESCE_RX_USECS_HIGH], &mod); + ethnl_update_u32(&coalesce.rx_max_coalesced_frames_high, + tb[ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH], &mod); + ethnl_update_u32(&coalesce.tx_coalesce_usecs_high, + tb[ETHTOOL_A_COALESCE_TX_USECS_HIGH], &mod); + ethnl_update_u32(&coalesce.tx_max_coalesced_frames_high, + tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH], &mod); + ethnl_update_u32(&coalesce.rate_sample_interval, + tb[ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL], &mod); + ethnl_update_u32(&kernel_coalesce.tx_aggr_max_bytes, + tb[ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES], &mod); + ethnl_update_u32(&kernel_coalesce.tx_aggr_max_frames, + tb[ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES], &mod); + ethnl_update_u32(&kernel_coalesce.tx_aggr_time_usecs, + tb[ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS], &mod); + + if (dev->irq_moder && dev->irq_moder->profile_flags & DIM_PROFILE_RX) { + ret = ethnl_update_profile(dev, &dev->irq_moder->rx_profile, + tb[ETHTOOL_A_COALESCE_RX_PROFILE], + &mod, info->extack); + if (ret < 0) + return ret; + } + + if (dev->irq_moder && dev->irq_moder->profile_flags & DIM_PROFILE_TX) { + ret = ethnl_update_profile(dev, &dev->irq_moder->tx_profile, + tb[ETHTOOL_A_COALESCE_TX_PROFILE], + &mod, info->extack); + if (ret < 0) + return ret; + } + + /* Update operation modes */ + ethnl_update_bool32(&coalesce.use_adaptive_rx_coalesce, + tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX], &mod_mode); + ethnl_update_bool32(&coalesce.use_adaptive_tx_coalesce, + tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX], &mod_mode); + ethnl_update_u8(&kernel_coalesce.use_cqe_mode_tx, + tb[ETHTOOL_A_COALESCE_USE_CQE_MODE_TX], &mod_mode); + ethnl_update_u8(&kernel_coalesce.use_cqe_mode_rx, + tb[ETHTOOL_A_COALESCE_USE_CQE_MODE_RX], &mod_mode); + + *dual_change = mod && mod_mode; + if (!mod && !mod_mode) + return 0; + + ret = dev->ethtool_ops->set_coalesce(dev, &coalesce, &kernel_coalesce, + info->extack); + return ret < 0 ? ret : 1; +} + +static int +ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info) +{ + bool dual_change; + int err, ret; + + /* SET_COALESCE may change operation mode and parameters in one call. + * Changing operation mode may cause the driver to reset the parameter + * values, and therefore ignore user input (driver does not know which + * parameters come from user and which are echoed back from ->get). + * To not complicate the drivers if user tries to change both the mode + * and parameters at once - call the driver twice. + */ + err = __ethnl_set_coalesce(req_info, info, &dual_change); + if (err < 0) + return err; + ret = err; + + if (ret && dual_change) { + err = __ethnl_set_coalesce(req_info, info, &dual_change); + if (err < 0) + return err; + } + return ret; +} + +const struct ethnl_request_ops ethnl_coalesce_request_ops = { + .request_cmd = ETHTOOL_MSG_COALESCE_GET, + .reply_cmd = ETHTOOL_MSG_COALESCE_GET_REPLY, + .hdr_attr = ETHTOOL_A_COALESCE_HEADER, + .req_info_size = sizeof(struct coalesce_req_info), + .reply_data_size = sizeof(struct coalesce_reply_data), + + .prepare_data = coalesce_prepare_data, + .reply_size = coalesce_reply_size, + .fill_reply = coalesce_fill_reply, + + .set_validate = ethnl_set_coalesce_validate, + .set = ethnl_set_coalesce, + .set_ntf_cmd = ETHTOOL_MSG_COALESCE_NTF, +}; diff --git a/net/ethtool/common.c b/net/ethtool/common.c new file mode 100644 index 000000000000..369c05cf8163 --- /dev/null +++ b/net/ethtool/common.c @@ -0,0 +1,1169 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool_netlink.h> +#include <linux/net_tstamp.h> +#include <linux/phy.h> +#include <linux/rtnetlink.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/phy_link_topology.h> +#include <net/netdev_queues.h> + +#include "netlink.h" +#include "common.h" +#include "../core/dev.h" + + +const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = { + [NETIF_F_SG_BIT] = "tx-scatter-gather", + [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4", + [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic", + [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", + [NETIF_F_HIGHDMA_BIT] = "highdma", + [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", + [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert", + + [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse", + [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter", + [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert", + [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse", + [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter", + [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", + [NETIF_F_GSO_BIT] = "tx-generic-segmentation", + [NETIF_F_GRO_BIT] = "rx-gro", + [NETIF_F_GRO_HW_BIT] = "rx-gro-hw", + [NETIF_F_LRO_BIT] = "rx-lro", + + [NETIF_F_TSO_BIT] = "tx-tcp-segmentation", + [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust", + [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", + [NETIF_F_GSO_ACCECN_BIT] = "tx-tcp-accecn-segmentation", + [NETIF_F_TSO_MANGLEID_BIT] = "tx-tcp-mangleid-segmentation", + [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", + [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", + [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", + [NETIF_F_GSO_GRE_CSUM_BIT] = "tx-gre-csum-segmentation", + [NETIF_F_GSO_IPXIP4_BIT] = "tx-ipxip4-segmentation", + [NETIF_F_GSO_IPXIP6_BIT] = "tx-ipxip6-segmentation", + [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", + [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", + [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial", + [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation", + [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation", + [NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation", + [NETIF_F_GSO_UDP_L4_BIT] = "tx-udp-segmentation", + [NETIF_F_GSO_FRAGLIST_BIT] = "tx-gso-list", + + [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", + [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", + [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter", + [NETIF_F_RXHASH_BIT] = "rx-hashing", + [NETIF_F_RXCSUM_BIT] = "rx-checksum", + [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy", + [NETIF_F_LOOPBACK_BIT] = "loopback", + [NETIF_F_RXFCS_BIT] = "rx-fcs", + [NETIF_F_RXALL_BIT] = "rx-all", + [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", + [NETIF_F_HW_TC_BIT] = "hw-tc-offload", + [NETIF_F_HW_ESP_BIT] = "esp-hw-offload", + [NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload", + [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload", + [NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record", + [NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload", + [NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload", + [NETIF_F_GRO_FRAGLIST_BIT] = "rx-gro-list", + [NETIF_F_HW_MACSEC_BIT] = "macsec-hw-offload", + [NETIF_F_GRO_UDP_FWD_BIT] = "rx-udp-gro-forwarding", + [NETIF_F_HW_HSR_TAG_INS_BIT] = "hsr-tag-ins-offload", + [NETIF_F_HW_HSR_TAG_RM_BIT] = "hsr-tag-rm-offload", + [NETIF_F_HW_HSR_FWD_BIT] = "hsr-fwd-offload", + [NETIF_F_HW_HSR_DUP_BIT] = "hsr-dup-offload", +}; + +const char +rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = { + [ETH_RSS_HASH_TOP_BIT] = "toeplitz", + [ETH_RSS_HASH_XOR_BIT] = "xor", + [ETH_RSS_HASH_CRC32_BIT] = "crc32", +}; + +const char +tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = { + [ETHTOOL_ID_UNSPEC] = "Unspec", + [ETHTOOL_RX_COPYBREAK] = "rx-copybreak", + [ETHTOOL_TX_COPYBREAK] = "tx-copybreak", + [ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout", + [ETHTOOL_TX_COPYBREAK_BUF_SIZE] = "tx-copybreak-buf-size", +}; + +const char +phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN] = { + [ETHTOOL_ID_UNSPEC] = "Unspec", + [ETHTOOL_PHY_DOWNSHIFT] = "phy-downshift", + [ETHTOOL_PHY_FAST_LINK_DOWN] = "phy-fast-link-down", + [ETHTOOL_PHY_EDPD] = "phy-energy-detect-power-down", +}; + +#define __LINK_MODE_NAME(speed, type, duplex) \ + #speed "base" #type "/" #duplex +#define __DEFINE_LINK_MODE_NAME(speed, type, duplex) \ + [ETHTOOL_LINK_MODE(speed, type, duplex)] = \ + __LINK_MODE_NAME(speed, type, duplex) +#define __DEFINE_SPECIAL_MODE_NAME(_mode, _name) \ + [ETHTOOL_LINK_MODE_ ## _mode ## _BIT] = _name + +const char link_mode_names[][ETH_GSTRING_LEN] = { + __DEFINE_LINK_MODE_NAME(10, T, Half), + __DEFINE_LINK_MODE_NAME(10, T, Full), + __DEFINE_LINK_MODE_NAME(100, T, Half), + __DEFINE_LINK_MODE_NAME(100, T, Full), + __DEFINE_LINK_MODE_NAME(1000, T, Half), + __DEFINE_LINK_MODE_NAME(1000, T, Full), + __DEFINE_SPECIAL_MODE_NAME(Autoneg, "Autoneg"), + __DEFINE_SPECIAL_MODE_NAME(TP, "TP"), + __DEFINE_SPECIAL_MODE_NAME(AUI, "AUI"), + __DEFINE_SPECIAL_MODE_NAME(MII, "MII"), + __DEFINE_SPECIAL_MODE_NAME(FIBRE, "FIBRE"), + __DEFINE_SPECIAL_MODE_NAME(BNC, "BNC"), + __DEFINE_LINK_MODE_NAME(10000, T, Full), + __DEFINE_SPECIAL_MODE_NAME(Pause, "Pause"), + __DEFINE_SPECIAL_MODE_NAME(Asym_Pause, "Asym_Pause"), + __DEFINE_LINK_MODE_NAME(2500, X, Full), + __DEFINE_SPECIAL_MODE_NAME(Backplane, "Backplane"), + __DEFINE_LINK_MODE_NAME(1000, KX, Full), + __DEFINE_LINK_MODE_NAME(10000, KX4, Full), + __DEFINE_LINK_MODE_NAME(10000, KR, Full), + __DEFINE_SPECIAL_MODE_NAME(10000baseR_FEC, "10000baseR_FEC"), + __DEFINE_LINK_MODE_NAME(20000, MLD2, Full), + __DEFINE_LINK_MODE_NAME(20000, KR2, Full), + __DEFINE_LINK_MODE_NAME(40000, KR4, Full), + __DEFINE_LINK_MODE_NAME(40000, CR4, Full), + __DEFINE_LINK_MODE_NAME(40000, SR4, Full), + __DEFINE_LINK_MODE_NAME(40000, LR4, Full), + __DEFINE_LINK_MODE_NAME(56000, KR4, Full), + __DEFINE_LINK_MODE_NAME(56000, CR4, Full), + __DEFINE_LINK_MODE_NAME(56000, SR4, Full), + __DEFINE_LINK_MODE_NAME(56000, LR4, Full), + __DEFINE_LINK_MODE_NAME(25000, CR, Full), + __DEFINE_LINK_MODE_NAME(25000, KR, Full), + __DEFINE_LINK_MODE_NAME(25000, SR, Full), + __DEFINE_LINK_MODE_NAME(50000, CR2, Full), + __DEFINE_LINK_MODE_NAME(50000, KR2, Full), + __DEFINE_LINK_MODE_NAME(100000, KR4, Full), + __DEFINE_LINK_MODE_NAME(100000, SR4, Full), + __DEFINE_LINK_MODE_NAME(100000, CR4, Full), + __DEFINE_LINK_MODE_NAME(100000, LR4_ER4, Full), + __DEFINE_LINK_MODE_NAME(50000, SR2, Full), + __DEFINE_LINK_MODE_NAME(1000, X, Full), + __DEFINE_LINK_MODE_NAME(10000, CR, Full), + __DEFINE_LINK_MODE_NAME(10000, SR, Full), + __DEFINE_LINK_MODE_NAME(10000, LR, Full), + __DEFINE_LINK_MODE_NAME(10000, LRM, Full), + __DEFINE_LINK_MODE_NAME(10000, ER, Full), + __DEFINE_LINK_MODE_NAME(2500, T, Full), + __DEFINE_LINK_MODE_NAME(5000, T, Full), + __DEFINE_SPECIAL_MODE_NAME(FEC_NONE, "None"), + __DEFINE_SPECIAL_MODE_NAME(FEC_RS, "RS"), + __DEFINE_SPECIAL_MODE_NAME(FEC_BASER, "BASER"), + __DEFINE_LINK_MODE_NAME(50000, KR, Full), + __DEFINE_LINK_MODE_NAME(50000, SR, Full), + __DEFINE_LINK_MODE_NAME(50000, CR, Full), + __DEFINE_LINK_MODE_NAME(50000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_NAME(50000, DR, Full), + __DEFINE_LINK_MODE_NAME(100000, KR2, Full), + __DEFINE_LINK_MODE_NAME(100000, SR2, Full), + __DEFINE_LINK_MODE_NAME(100000, CR2, Full), + __DEFINE_LINK_MODE_NAME(100000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_NAME(100000, DR2, Full), + __DEFINE_LINK_MODE_NAME(200000, KR4, Full), + __DEFINE_LINK_MODE_NAME(200000, SR4, Full), + __DEFINE_LINK_MODE_NAME(200000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_NAME(200000, DR4, Full), + __DEFINE_LINK_MODE_NAME(200000, CR4, Full), + __DEFINE_LINK_MODE_NAME(100, T1, Full), + __DEFINE_LINK_MODE_NAME(1000, T1, Full), + __DEFINE_LINK_MODE_NAME(400000, KR8, Full), + __DEFINE_LINK_MODE_NAME(400000, SR8, Full), + __DEFINE_LINK_MODE_NAME(400000, LR8_ER8_FR8, Full), + __DEFINE_LINK_MODE_NAME(400000, DR8, Full), + __DEFINE_LINK_MODE_NAME(400000, CR8, Full), + __DEFINE_SPECIAL_MODE_NAME(FEC_LLRS, "LLRS"), + __DEFINE_LINK_MODE_NAME(100000, KR, Full), + __DEFINE_LINK_MODE_NAME(100000, SR, Full), + __DEFINE_LINK_MODE_NAME(100000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_NAME(100000, DR, Full), + __DEFINE_LINK_MODE_NAME(100000, CR, Full), + __DEFINE_LINK_MODE_NAME(200000, KR2, Full), + __DEFINE_LINK_MODE_NAME(200000, SR2, Full), + __DEFINE_LINK_MODE_NAME(200000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_NAME(200000, DR2, Full), + __DEFINE_LINK_MODE_NAME(200000, CR2, Full), + __DEFINE_LINK_MODE_NAME(400000, KR4, Full), + __DEFINE_LINK_MODE_NAME(400000, SR4, Full), + __DEFINE_LINK_MODE_NAME(400000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_NAME(400000, DR4, Full), + __DEFINE_LINK_MODE_NAME(400000, CR4, Full), + __DEFINE_LINK_MODE_NAME(100, FX, Half), + __DEFINE_LINK_MODE_NAME(100, FX, Full), + __DEFINE_LINK_MODE_NAME(10, T1L, Full), + __DEFINE_LINK_MODE_NAME(800000, CR8, Full), + __DEFINE_LINK_MODE_NAME(800000, KR8, Full), + __DEFINE_LINK_MODE_NAME(800000, DR8, Full), + __DEFINE_LINK_MODE_NAME(800000, DR8_2, Full), + __DEFINE_LINK_MODE_NAME(800000, SR8, Full), + __DEFINE_LINK_MODE_NAME(800000, VR8, Full), + __DEFINE_LINK_MODE_NAME(10, T1S, Full), + __DEFINE_LINK_MODE_NAME(10, T1S, Half), + __DEFINE_LINK_MODE_NAME(10, T1S_P2MP, Half), + __DEFINE_LINK_MODE_NAME(10, T1BRR, Full), + __DEFINE_LINK_MODE_NAME(200000, CR, Full), + __DEFINE_LINK_MODE_NAME(200000, KR, Full), + __DEFINE_LINK_MODE_NAME(200000, DR, Full), + __DEFINE_LINK_MODE_NAME(200000, DR_2, Full), + __DEFINE_LINK_MODE_NAME(200000, SR, Full), + __DEFINE_LINK_MODE_NAME(200000, VR, Full), + __DEFINE_LINK_MODE_NAME(400000, CR2, Full), + __DEFINE_LINK_MODE_NAME(400000, KR2, Full), + __DEFINE_LINK_MODE_NAME(400000, DR2, Full), + __DEFINE_LINK_MODE_NAME(400000, DR2_2, Full), + __DEFINE_LINK_MODE_NAME(400000, SR2, Full), + __DEFINE_LINK_MODE_NAME(400000, VR2, Full), + __DEFINE_LINK_MODE_NAME(800000, CR4, Full), + __DEFINE_LINK_MODE_NAME(800000, KR4, Full), + __DEFINE_LINK_MODE_NAME(800000, DR4, Full), + __DEFINE_LINK_MODE_NAME(800000, DR4_2, Full), + __DEFINE_LINK_MODE_NAME(800000, SR4, Full), + __DEFINE_LINK_MODE_NAME(800000, VR4, Full), + __DEFINE_LINK_MODE_NAME(1600000, CR8, Full), + __DEFINE_LINK_MODE_NAME(1600000, KR8, Full), + __DEFINE_LINK_MODE_NAME(1600000, DR8, Full), + __DEFINE_LINK_MODE_NAME(1600000, DR8_2, Full), +}; +static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); + +#define __LINK_MODE_LANES_CR 1 +#define __LINK_MODE_LANES_CR2 2 +#define __LINK_MODE_LANES_CR4 4 +#define __LINK_MODE_LANES_CR8 8 +#define __LINK_MODE_LANES_DR 1 +#define __LINK_MODE_LANES_DR_2 1 +#define __LINK_MODE_LANES_DR2 2 +#define __LINK_MODE_LANES_DR2_2 2 +#define __LINK_MODE_LANES_DR4 4 +#define __LINK_MODE_LANES_DR4_2 4 +#define __LINK_MODE_LANES_DR8 8 +#define __LINK_MODE_LANES_KR 1 +#define __LINK_MODE_LANES_KR2 2 +#define __LINK_MODE_LANES_KR4 4 +#define __LINK_MODE_LANES_KR8 8 +#define __LINK_MODE_LANES_SR 1 +#define __LINK_MODE_LANES_SR2 2 +#define __LINK_MODE_LANES_SR4 4 +#define __LINK_MODE_LANES_SR8 8 +#define __LINK_MODE_LANES_ER 1 +#define __LINK_MODE_LANES_KX 1 +#define __LINK_MODE_LANES_KX4 4 +#define __LINK_MODE_LANES_LR 1 +#define __LINK_MODE_LANES_LR4 4 +#define __LINK_MODE_LANES_LR4_ER4 4 +#define __LINK_MODE_LANES_LR_ER_FR 1 +#define __LINK_MODE_LANES_LR2_ER2_FR2 2 +#define __LINK_MODE_LANES_LR4_ER4_FR4 4 +#define __LINK_MODE_LANES_LR8_ER8_FR8 8 +#define __LINK_MODE_LANES_LRM 1 +#define __LINK_MODE_LANES_MLD2 2 +#define __LINK_MODE_LANES_T 1 +#define __LINK_MODE_LANES_T1 1 +#define __LINK_MODE_LANES_X 1 +#define __LINK_MODE_LANES_FX 1 +#define __LINK_MODE_LANES_T1L 1 +#define __LINK_MODE_LANES_T1S 1 +#define __LINK_MODE_LANES_T1S_P2MP 1 +#define __LINK_MODE_LANES_VR 1 +#define __LINK_MODE_LANES_VR2 2 +#define __LINK_MODE_LANES_VR4 4 +#define __LINK_MODE_LANES_VR8 8 +#define __LINK_MODE_LANES_DR8_2 8 +#define __LINK_MODE_LANES_T1BRR 1 + +#define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \ + [ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \ + .speed = SPEED_ ## _speed, \ + .lanes = __LINK_MODE_LANES_ ## _type, \ + .duplex = __DUPLEX_ ## _duplex \ + } +#define __DUPLEX_Half DUPLEX_HALF +#define __DUPLEX_Full DUPLEX_FULL +#define __DEFINE_SPECIAL_MODE_PARAMS(_mode) \ + [ETHTOOL_LINK_MODE_ ## _mode ## _BIT] = { \ + .speed = SPEED_UNKNOWN, \ + .lanes = 0, \ + .duplex = DUPLEX_UNKNOWN, \ + } + +const struct link_mode_info link_mode_params[] = { + __DEFINE_LINK_MODE_PARAMS(10, T, Half), + __DEFINE_LINK_MODE_PARAMS(10, T, Full), + __DEFINE_LINK_MODE_PARAMS(100, T, Half), + __DEFINE_LINK_MODE_PARAMS(100, T, Full), + __DEFINE_LINK_MODE_PARAMS(1000, T, Half), + __DEFINE_LINK_MODE_PARAMS(1000, T, Full), + __DEFINE_SPECIAL_MODE_PARAMS(Autoneg), + __DEFINE_SPECIAL_MODE_PARAMS(TP), + __DEFINE_SPECIAL_MODE_PARAMS(AUI), + __DEFINE_SPECIAL_MODE_PARAMS(MII), + __DEFINE_SPECIAL_MODE_PARAMS(FIBRE), + __DEFINE_SPECIAL_MODE_PARAMS(BNC), + __DEFINE_LINK_MODE_PARAMS(10000, T, Full), + __DEFINE_SPECIAL_MODE_PARAMS(Pause), + __DEFINE_SPECIAL_MODE_PARAMS(Asym_Pause), + __DEFINE_LINK_MODE_PARAMS(2500, X, Full), + __DEFINE_SPECIAL_MODE_PARAMS(Backplane), + __DEFINE_LINK_MODE_PARAMS(1000, KX, Full), + __DEFINE_LINK_MODE_PARAMS(10000, KX4, Full), + __DEFINE_LINK_MODE_PARAMS(10000, KR, Full), + [ETHTOOL_LINK_MODE_10000baseR_FEC_BIT] = { + .speed = SPEED_10000, + .lanes = 1, + .duplex = DUPLEX_FULL, + }, + __DEFINE_LINK_MODE_PARAMS(20000, MLD2, Full), + __DEFINE_LINK_MODE_PARAMS(20000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(40000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(40000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(40000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(40000, LR4, Full), + __DEFINE_LINK_MODE_PARAMS(56000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(56000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(56000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(56000, LR4, Full), + __DEFINE_LINK_MODE_PARAMS(25000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(25000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(25000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(50000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(100000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(100000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(100000, LR4_ER4, Full), + __DEFINE_LINK_MODE_PARAMS(50000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(1000, X, Full), + __DEFINE_LINK_MODE_PARAMS(10000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(10000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(10000, LR, Full), + __DEFINE_LINK_MODE_PARAMS(10000, LRM, Full), + __DEFINE_LINK_MODE_PARAMS(10000, ER, Full), + __DEFINE_LINK_MODE_PARAMS(2500, T, Full), + __DEFINE_LINK_MODE_PARAMS(5000, T, Full), + __DEFINE_SPECIAL_MODE_PARAMS(FEC_NONE), + __DEFINE_SPECIAL_MODE_PARAMS(FEC_RS), + __DEFINE_SPECIAL_MODE_PARAMS(FEC_BASER), + __DEFINE_LINK_MODE_PARAMS(50000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, DR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, DR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(200000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(200000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR4, Full), + __DEFINE_LINK_MODE_PARAMS(200000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(100, T1, Full), + __DEFINE_LINK_MODE_PARAMS(1000, T1, Full), + __DEFINE_LINK_MODE_PARAMS(400000, KR8, Full), + __DEFINE_LINK_MODE_PARAMS(400000, SR8, Full), + __DEFINE_LINK_MODE_PARAMS(400000, LR8_ER8_FR8, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR8, Full), + __DEFINE_LINK_MODE_PARAMS(400000, CR8, Full), + __DEFINE_SPECIAL_MODE_PARAMS(FEC_LLRS), + __DEFINE_LINK_MODE_PARAMS(100000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, DR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(100, FX, Half), + __DEFINE_LINK_MODE_PARAMS(100, FX, Full), + __DEFINE_LINK_MODE_PARAMS(10, T1L, Full), + __DEFINE_LINK_MODE_PARAMS(800000, CR8, Full), + __DEFINE_LINK_MODE_PARAMS(800000, KR8, Full), + __DEFINE_LINK_MODE_PARAMS(800000, DR8, Full), + __DEFINE_LINK_MODE_PARAMS(800000, DR8_2, Full), + __DEFINE_LINK_MODE_PARAMS(800000, SR8, Full), + __DEFINE_LINK_MODE_PARAMS(800000, VR8, Full), + __DEFINE_LINK_MODE_PARAMS(10, T1S, Full), + __DEFINE_LINK_MODE_PARAMS(10, T1S, Half), + __DEFINE_LINK_MODE_PARAMS(10, T1S_P2MP, Half), + __DEFINE_LINK_MODE_PARAMS(10, T1BRR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR_2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, VR, Full), + __DEFINE_LINK_MODE_PARAMS(400000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR2_2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, VR2, Full), + __DEFINE_LINK_MODE_PARAMS(800000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(800000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(800000, DR4, Full), + __DEFINE_LINK_MODE_PARAMS(800000, DR4_2, Full), + __DEFINE_LINK_MODE_PARAMS(800000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(800000, VR4, Full), + __DEFINE_LINK_MODE_PARAMS(1600000, CR8, Full), + __DEFINE_LINK_MODE_PARAMS(1600000, KR8, Full), + __DEFINE_LINK_MODE_PARAMS(1600000, DR8, Full), + __DEFINE_LINK_MODE_PARAMS(1600000, DR8_2, Full), +}; +static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS); +EXPORT_SYMBOL_GPL(link_mode_params); + +const char netif_msg_class_names[][ETH_GSTRING_LEN] = { + [NETIF_MSG_DRV_BIT] = "drv", + [NETIF_MSG_PROBE_BIT] = "probe", + [NETIF_MSG_LINK_BIT] = "link", + [NETIF_MSG_TIMER_BIT] = "timer", + [NETIF_MSG_IFDOWN_BIT] = "ifdown", + [NETIF_MSG_IFUP_BIT] = "ifup", + [NETIF_MSG_RX_ERR_BIT] = "rx_err", + [NETIF_MSG_TX_ERR_BIT] = "tx_err", + [NETIF_MSG_TX_QUEUED_BIT] = "tx_queued", + [NETIF_MSG_INTR_BIT] = "intr", + [NETIF_MSG_TX_DONE_BIT] = "tx_done", + [NETIF_MSG_RX_STATUS_BIT] = "rx_status", + [NETIF_MSG_PKTDATA_BIT] = "pktdata", + [NETIF_MSG_HW_BIT] = "hw", + [NETIF_MSG_WOL_BIT] = "wol", +}; +static_assert(ARRAY_SIZE(netif_msg_class_names) == NETIF_MSG_CLASS_COUNT); + +const char wol_mode_names[][ETH_GSTRING_LEN] = { + [const_ilog2(WAKE_PHY)] = "phy", + [const_ilog2(WAKE_UCAST)] = "ucast", + [const_ilog2(WAKE_MCAST)] = "mcast", + [const_ilog2(WAKE_BCAST)] = "bcast", + [const_ilog2(WAKE_ARP)] = "arp", + [const_ilog2(WAKE_MAGIC)] = "magic", + [const_ilog2(WAKE_MAGICSECURE)] = "magicsecure", + [const_ilog2(WAKE_FILTER)] = "filter", +}; +static_assert(ARRAY_SIZE(wol_mode_names) == WOL_MODE_COUNT); + +const char sof_timestamping_names[][ETH_GSTRING_LEN] = { + [const_ilog2(SOF_TIMESTAMPING_TX_HARDWARE)] = "hardware-transmit", + [const_ilog2(SOF_TIMESTAMPING_TX_SOFTWARE)] = "software-transmit", + [const_ilog2(SOF_TIMESTAMPING_RX_HARDWARE)] = "hardware-receive", + [const_ilog2(SOF_TIMESTAMPING_RX_SOFTWARE)] = "software-receive", + [const_ilog2(SOF_TIMESTAMPING_SOFTWARE)] = "software-system-clock", + [const_ilog2(SOF_TIMESTAMPING_SYS_HARDWARE)] = "hardware-legacy-clock", + [const_ilog2(SOF_TIMESTAMPING_RAW_HARDWARE)] = "hardware-raw-clock", + [const_ilog2(SOF_TIMESTAMPING_OPT_ID)] = "option-id", + [const_ilog2(SOF_TIMESTAMPING_TX_SCHED)] = "sched-transmit", + [const_ilog2(SOF_TIMESTAMPING_TX_ACK)] = "ack-transmit", + [const_ilog2(SOF_TIMESTAMPING_OPT_CMSG)] = "option-cmsg", + [const_ilog2(SOF_TIMESTAMPING_OPT_TSONLY)] = "option-tsonly", + [const_ilog2(SOF_TIMESTAMPING_OPT_STATS)] = "option-stats", + [const_ilog2(SOF_TIMESTAMPING_OPT_PKTINFO)] = "option-pktinfo", + [const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw", + [const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc", + [const_ilog2(SOF_TIMESTAMPING_OPT_ID_TCP)] = "option-id-tcp", + [const_ilog2(SOF_TIMESTAMPING_OPT_RX_FILTER)] = "option-rx-filter", + [const_ilog2(SOF_TIMESTAMPING_TX_COMPLETION)] = "tx-completion", +}; +static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT); + +const char ts_tx_type_names[][ETH_GSTRING_LEN] = { + [HWTSTAMP_TX_OFF] = "off", + [HWTSTAMP_TX_ON] = "on", + [HWTSTAMP_TX_ONESTEP_SYNC] = "onestep-sync", + [HWTSTAMP_TX_ONESTEP_P2P] = "onestep-p2p", +}; +static_assert(ARRAY_SIZE(ts_tx_type_names) == __HWTSTAMP_TX_CNT); + +const char ts_rx_filter_names[][ETH_GSTRING_LEN] = { + [HWTSTAMP_FILTER_NONE] = "none", + [HWTSTAMP_FILTER_ALL] = "all", + [HWTSTAMP_FILTER_SOME] = "some", + [HWTSTAMP_FILTER_PTP_V1_L4_EVENT] = "ptpv1-l4-event", + [HWTSTAMP_FILTER_PTP_V1_L4_SYNC] = "ptpv1-l4-sync", + [HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ] = "ptpv1-l4-delay-req", + [HWTSTAMP_FILTER_PTP_V2_L4_EVENT] = "ptpv2-l4-event", + [HWTSTAMP_FILTER_PTP_V2_L4_SYNC] = "ptpv2-l4-sync", + [HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ] = "ptpv2-l4-delay-req", + [HWTSTAMP_FILTER_PTP_V2_L2_EVENT] = "ptpv2-l2-event", + [HWTSTAMP_FILTER_PTP_V2_L2_SYNC] = "ptpv2-l2-sync", + [HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ] = "ptpv2-l2-delay-req", + [HWTSTAMP_FILTER_PTP_V2_EVENT] = "ptpv2-event", + [HWTSTAMP_FILTER_PTP_V2_SYNC] = "ptpv2-sync", + [HWTSTAMP_FILTER_PTP_V2_DELAY_REQ] = "ptpv2-delay-req", + [HWTSTAMP_FILTER_NTP_ALL] = "ntp-all", +}; +static_assert(ARRAY_SIZE(ts_rx_filter_names) == __HWTSTAMP_FILTER_CNT); + +const char ts_flags_names[][ETH_GSTRING_LEN] = { + [const_ilog2(HWTSTAMP_FLAG_BONDED_PHC_INDEX)] = "bonded-phc-index", +}; +static_assert(ARRAY_SIZE(ts_flags_names) == __HWTSTAMP_FLAG_CNT); + +const char udp_tunnel_type_names[][ETH_GSTRING_LEN] = { + [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN] = "vxlan", + [ETHTOOL_UDP_TUNNEL_TYPE_GENEVE] = "geneve", + [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE] = "vxlan-gpe", +}; +static_assert(ARRAY_SIZE(udp_tunnel_type_names) == + __ETHTOOL_UDP_TUNNEL_TYPE_CNT); + +/* return false if legacy contained non-0 deprecated fields + * maxtxpkt/maxrxpkt. rest of ksettings always updated + */ +bool +convert_legacy_settings_to_link_ksettings( + struct ethtool_link_ksettings *link_ksettings, + const struct ethtool_cmd *legacy_settings) +{ + bool retval = true; + + memset(link_ksettings, 0, sizeof(*link_ksettings)); + + /* This is used to tell users that driver is still using these + * deprecated legacy fields, and they should not use + * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS + */ + if (legacy_settings->maxtxpkt || + legacy_settings->maxrxpkt) + retval = false; + + ethtool_convert_legacy_u32_to_link_mode( + link_ksettings->link_modes.supported, + legacy_settings->supported); + ethtool_convert_legacy_u32_to_link_mode( + link_ksettings->link_modes.advertising, + legacy_settings->advertising); + ethtool_convert_legacy_u32_to_link_mode( + link_ksettings->link_modes.lp_advertising, + legacy_settings->lp_advertising); + link_ksettings->base.speed + = ethtool_cmd_speed(legacy_settings); + link_ksettings->base.duplex + = legacy_settings->duplex; + link_ksettings->base.port + = legacy_settings->port; + link_ksettings->base.phy_address + = legacy_settings->phy_address; + link_ksettings->base.autoneg + = legacy_settings->autoneg; + link_ksettings->base.mdio_support + = legacy_settings->mdio_support; + link_ksettings->base.eth_tp_mdix + = legacy_settings->eth_tp_mdix; + link_ksettings->base.eth_tp_mdix_ctrl + = legacy_settings->eth_tp_mdix_ctrl; + return retval; +} + +int __ethtool_get_link(struct net_device *dev) +{ + if (!dev->ethtool_ops->get_link) + return -EOPNOTSUPP; + + return netif_running(dev) && dev->ethtool_ops->get_link(dev); +} + +int ethtool_get_rx_ring_count(struct net_device *dev) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxnfc rx_rings = {}; + int ret; + + if (ops->get_rx_ring_count) + return ops->get_rx_ring_count(dev); + + if (!ops->get_rxnfc) + return -EOPNOTSUPP; + + rx_rings.cmd = ETHTOOL_GRXRINGS; + ret = ops->get_rxnfc(dev, &rx_rings, NULL); + if (ret < 0) + return ret; + + return rx_rings.data; +} + +static int ethtool_get_rxnfc_rule_count(struct net_device *dev) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxnfc info = { + .cmd = ETHTOOL_GRXCLSRLCNT, + }; + int err; + + err = ops->get_rxnfc(dev, &info, NULL); + if (err) + return err; + + return info.rule_cnt; +} + +/* Max offset for one RSS context */ +static u32 ethtool_get_rss_ctx_max_channel(struct ethtool_rxfh_context *ctx) +{ + u32 max_ring = 0; + u32 i, *tbl; + + if (WARN_ON_ONCE(!ctx)) + return 0; + tbl = ethtool_rxfh_context_indir(ctx); + for (i = 0; i < ctx->indir_size; i++) + max_ring = max(max_ring, tbl[i]); + return max_ring; +} + +static int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxnfc *info; + int err, i, rule_cnt; + u64 max_ring = 0; + + if (!ops->get_rxnfc) + return -EOPNOTSUPP; + + rule_cnt = ethtool_get_rxnfc_rule_count(dev); + if (rule_cnt <= 0) + return -EINVAL; + + info = kvzalloc(struct_size(info, rule_locs, rule_cnt), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->cmd = ETHTOOL_GRXCLSRLALL; + info->rule_cnt = rule_cnt; + err = ops->get_rxnfc(dev, info, info->rule_locs); + if (err) + goto err_free_info; + + for (i = 0; i < rule_cnt; i++) { + struct ethtool_rxnfc rule_info = { + .cmd = ETHTOOL_GRXCLSRULE, + .fs.location = info->rule_locs[i], + }; + + err = ops->get_rxnfc(dev, &rule_info, NULL); + if (err) + goto err_free_info; + + if (rule_info.fs.ring_cookie != RX_CLS_FLOW_DISC && + rule_info.fs.ring_cookie != RX_CLS_FLOW_WAKE && + !ethtool_get_flow_spec_ring_vf(rule_info.fs.ring_cookie)) { + u64 ring = rule_info.fs.ring_cookie; + + if (rule_info.flow_type & FLOW_RSS) { + struct ethtool_rxfh_context *ctx; + + ctx = xa_load(&dev->ethtool->rss_ctx, + rule_info.rss_context); + ring += ethtool_get_rss_ctx_max_channel(ctx); + } + max_ring = max_t(u64, max_ring, ring); + } + } + + kvfree(info); + *max = max_ring; + return 0; + +err_free_info: + kvfree(info); + return err; +} + +/* Max offset across all of a device's RSS contexts */ +static u32 ethtool_get_max_rss_ctx_channel(struct net_device *dev) +{ + struct ethtool_rxfh_context *ctx; + unsigned long context; + u32 max_ring = 0; + + mutex_lock(&dev->ethtool->rss_lock); + xa_for_each(&dev->ethtool->rss_ctx, context, ctx) + max_ring = max(max_ring, ethtool_get_rss_ctx_max_channel(ctx)); + mutex_unlock(&dev->ethtool->rss_lock); + + return max_ring; +} + +static u32 ethtool_get_max_rxfh_channel(struct net_device *dev) +{ + struct ethtool_rxfh_param rxfh = {}; + u32 dev_size, current_max = 0; + int ret; + + /* While we do track whether RSS context has an indirection + * table explicitly set by the user, no driver looks at that bit. + * Assume drivers won't auto-regenerate the additional tables, + * to be safe. + */ + current_max = ethtool_get_max_rss_ctx_channel(dev); + + if (!netif_is_rxfh_configured(dev)) + return current_max; + + if (!dev->ethtool_ops->get_rxfh_indir_size || + !dev->ethtool_ops->get_rxfh) + return current_max; + dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); + if (dev_size == 0) + return current_max; + + rxfh.indir = kcalloc(dev_size, sizeof(rxfh.indir[0]), GFP_USER); + if (!rxfh.indir) + return U32_MAX; + + mutex_lock(&dev->ethtool->rss_lock); + ret = dev->ethtool_ops->get_rxfh(dev, &rxfh); + mutex_unlock(&dev->ethtool->rss_lock); + if (ret) { + current_max = U32_MAX; + goto out_free; + } + + while (dev_size--) + current_max = max(current_max, rxfh.indir[dev_size]); + +out_free: + kfree(rxfh.indir); + return current_max; +} + +int ethtool_check_max_channel(struct net_device *dev, + struct ethtool_channels channels, + struct genl_info *info) +{ + u64 max_rxnfc_in_use; + u32 max_rxfh_in_use; + int max_mp_in_use; + + /* ensure the new Rx count fits within the configured Rx flow + * indirection table/rxnfc settings + */ + if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use)) + max_rxnfc_in_use = 0; + max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev); + if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) { + if (info) + GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing indirection table (%d)", max_rxfh_in_use); + return -EINVAL; + } + if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) { + if (info) + GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing ntuple filter settings"); + return -EINVAL; + } + + max_mp_in_use = dev_get_min_mp_channel_count(dev); + if (channels.combined_count + channels.rx_count <= max_mp_in_use) { + if (info) + GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing memory provider setting (%d)", max_mp_in_use); + return -EINVAL; + } + + return 0; +} + +int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxnfc *info; + int rc, i, rule_cnt; + + if (!ops->get_rxnfc) + return 0; + + rule_cnt = ethtool_get_rxnfc_rule_count(dev); + if (!rule_cnt) + return 0; + + if (rule_cnt < 0) + return -EINVAL; + + info = kvzalloc(struct_size(info, rule_locs, rule_cnt), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->cmd = ETHTOOL_GRXCLSRLALL; + info->rule_cnt = rule_cnt; + rc = ops->get_rxnfc(dev, info, info->rule_locs); + if (rc) + goto out_free; + + for (i = 0; i < rule_cnt; i++) { + struct ethtool_rxnfc rule_info = { + .cmd = ETHTOOL_GRXCLSRULE, + .fs.location = info->rule_locs[i], + }; + + rc = ops->get_rxnfc(dev, &rule_info, NULL); + if (rc) + goto out_free; + + if (rule_info.fs.flow_type & FLOW_RSS && + rule_info.rss_context == rss_context) { + rc = -EBUSY; + goto out_free; + } + } + +out_free: + kvfree(info); + return rc; +} + +struct ethtool_rxfh_context * +ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops, + u32 indir_size, u32 key_size) +{ + size_t indir_bytes, flex_len, key_off, size; + struct ethtool_rxfh_context *ctx; + u32 priv_bytes, indir_max; + u16 key_max; + + key_max = max(key_size, ops->rxfh_key_space); + indir_max = max(indir_size, ops->rxfh_indir_space); + + priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32)); + indir_bytes = array_size(indir_max, sizeof(u32)); + + key_off = size_add(priv_bytes, indir_bytes); + flex_len = size_add(key_off, key_max); + size = struct_size_t(struct ethtool_rxfh_context, data, flex_len); + + ctx = kzalloc(size, GFP_KERNEL_ACCOUNT); + if (!ctx) + return NULL; + + ctx->indir_size = indir_size; + ctx->key_size = key_size; + ctx->key_off = key_off; + ctx->priv_size = ops->rxfh_priv_size; + + ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; + ctx->input_xfrm = RXH_XFRM_NO_CHANGE; + + return ctx; +} + +/* Check if fields configured for flow hash are symmetric - if src is included + * so is dst and vice versa. + */ +int ethtool_rxfh_config_is_sym(u64 rxfh) +{ + bool sym; + + sym = rxfh == (rxfh & (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)); + sym &= !!(rxfh & RXH_IP_SRC) == !!(rxfh & RXH_IP_DST); + sym &= !!(rxfh & RXH_L4_B_0_1) == !!(rxfh & RXH_L4_B_2_3); + + return sym; +} + +int ethtool_check_ops(const struct ethtool_ops *ops) +{ + if (WARN_ON(ops->set_coalesce && !ops->supported_coalesce_params)) + return -EINVAL; + if (WARN_ON(ops->rxfh_max_num_contexts == 1)) + return -EINVAL; + if (WARN_ON(ops->supported_input_xfrm && !ops->get_rxfh_fields)) + return -EINVAL; + if (WARN_ON(ops->supported_input_xfrm && + ops->rxfh_per_ctx_fields != ops->rxfh_per_ctx_key)) + return -EINVAL; + + /* NOTE: sufficiently insane drivers may swap ethtool_ops at runtime, + * the fact that ops are checked at registration time does not + * mean the ops attached to a netdev later on are sane. + */ + return 0; +} + +void ethtool_ringparam_get_cfg(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kparam, + struct netlink_ext_ack *extack) +{ + memset(param, 0, sizeof(*param)); + memset(kparam, 0, sizeof(*kparam)); + + param->cmd = ETHTOOL_GRINGPARAM; + dev->ethtool_ops->get_ringparam(dev, param, kparam, extack); + + /* Driver gives us current state, we want to return current config */ + kparam->tcp_data_split = dev->cfg->hds_config; + kparam->hds_thresh = dev->cfg->hds_thresh; +} + +static void ethtool_init_tsinfo(struct kernel_ethtool_ts_info *info) +{ + memset(info, 0, sizeof(*info)); + info->cmd = ETHTOOL_GET_TS_INFO; + info->phc_index = -1; +} + +int ethtool_net_get_ts_info_by_phc(struct net_device *dev, + struct kernel_ethtool_ts_info *info, + struct hwtstamp_provider_desc *hwprov_desc) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + int err; + + if (!ops->get_ts_info) + return -EOPNOTSUPP; + + /* Does ptp comes from netdev */ + ethtool_init_tsinfo(info); + info->phc_qualifier = hwprov_desc->qualifier; + err = ops->get_ts_info(dev, info); + if (err) + return err; + + if (info->phc_index == hwprov_desc->index && + net_support_hwtstamp_qualifier(dev, hwprov_desc->qualifier)) + return 0; + + return -ENODEV; +} + +struct phy_device * +ethtool_phy_get_ts_info_by_phc(struct net_device *dev, + struct kernel_ethtool_ts_info *info, + struct hwtstamp_provider_desc *hwprov_desc) +{ + int err; + + /* Only precise qualifier is supported in phydev */ + if (hwprov_desc->qualifier != HWTSTAMP_PROVIDER_QUALIFIER_PRECISE) + return ERR_PTR(-ENODEV); + + /* Look in the phy topology */ + if (dev->link_topo) { + struct phy_device_node *pdn; + unsigned long phy_index; + + xa_for_each(&dev->link_topo->phys, phy_index, pdn) { + if (!phy_has_tsinfo(pdn->phy)) + continue; + + ethtool_init_tsinfo(info); + err = phy_ts_info(pdn->phy, info); + if (err) + return ERR_PTR(err); + + if (info->phc_index == hwprov_desc->index) + return pdn->phy; + } + return ERR_PTR(-ENODEV); + } + + /* Look on the dev->phydev */ + if (phy_has_tsinfo(dev->phydev)) { + ethtool_init_tsinfo(info); + err = phy_ts_info(dev->phydev, info); + if (err) + return ERR_PTR(err); + + if (info->phc_index == hwprov_desc->index) + return dev->phydev; + } + + return ERR_PTR(-ENODEV); +} + +int ethtool_get_ts_info_by_phc(struct net_device *dev, + struct kernel_ethtool_ts_info *info, + struct hwtstamp_provider_desc *hwprov_desc) +{ + int err; + + err = ethtool_net_get_ts_info_by_phc(dev, info, hwprov_desc); + if (err == -ENODEV || err == -EOPNOTSUPP) { + struct phy_device *phy; + + phy = ethtool_phy_get_ts_info_by_phc(dev, info, hwprov_desc); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + /* Report the phc source only if we have a real + * phc source with an index. + */ + if (info->phc_index >= 0) { + info->phc_source = HWTSTAMP_SOURCE_PHYLIB; + info->phc_phyindex = phy->phyindex; + } + err = 0; + } else if (!err && info->phc_index >= 0) { + info->phc_source = HWTSTAMP_SOURCE_NETDEV; + } + + info->so_timestamping |= SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + return err; +} + +int __ethtool_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +{ + struct hwtstamp_provider *hwprov; + int err = 0; + + rcu_read_lock(); + hwprov = rcu_dereference(dev->hwprov); + /* No provider specified, use default behavior */ + if (!hwprov) { + const struct ethtool_ops *ops = dev->ethtool_ops; + struct phy_device *phydev = dev->phydev; + + ethtool_init_tsinfo(info); + if (phy_is_default_hwtstamp(phydev) && + phy_has_tsinfo(phydev)) { + err = phy_ts_info(phydev, info); + /* Report the phc source only if we have a real + * phc source with an index. + */ + if (!err && info->phc_index >= 0) { + info->phc_source = HWTSTAMP_SOURCE_PHYLIB; + info->phc_phyindex = phydev->phyindex; + } + } else if (ops->get_ts_info) { + err = ops->get_ts_info(dev, info); + if (!err && info->phc_index >= 0) + info->phc_source = HWTSTAMP_SOURCE_NETDEV; + } + + info->so_timestamping |= SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + rcu_read_unlock(); + return err; + } + + err = ethtool_get_ts_info_by_phc(dev, info, &hwprov->desc); + rcu_read_unlock(); + return err; +} + +bool net_support_hwtstamp_qualifier(struct net_device *dev, + enum hwtstamp_provider_qualifier qualifier) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops) + return false; + + /* Return true with precise qualifier and with NIC without + * qualifier description to not break the old behavior. + */ + if (!ops->supported_hwtstamp_qualifiers && + qualifier == HWTSTAMP_PROVIDER_QUALIFIER_PRECISE) + return true; + + if (ops->supported_hwtstamp_qualifiers & BIT(qualifier)) + return true; + + return false; +} + +int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index) +{ + struct kernel_ethtool_ts_info info = { }; + int num = 0; + + if (!__ethtool_get_ts_info(dev, &info)) + num = ptp_get_vclocks_index(info.phc_index, vclock_index); + + return num; +} +EXPORT_SYMBOL(ethtool_get_phc_vclocks); + +int ethtool_get_ts_info_by_layer(struct net_device *dev, struct kernel_ethtool_ts_info *info) +{ + return __ethtool_get_ts_info(dev, info); +} +EXPORT_SYMBOL(ethtool_get_ts_info_by_layer); + +const struct ethtool_phy_ops *ethtool_phy_ops; + +void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops) +{ + ASSERT_RTNL(); + ethtool_phy_ops = ops; +} +EXPORT_SYMBOL_GPL(ethtool_set_ethtool_phy_ops); + +void +ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, + enum ethtool_link_mode_bit_indices link_mode) +{ + const struct link_mode_info *link_info; + + if (WARN_ON_ONCE(link_mode >= __ETHTOOL_LINK_MODE_MASK_NBITS)) + return; + + link_info = &link_mode_params[link_mode]; + link_ksettings->base.speed = link_info->speed; + link_ksettings->lanes = link_info->lanes; + link_ksettings->base.duplex = link_info->duplex; +} +EXPORT_SYMBOL_GPL(ethtool_params_from_link_mode); + +/** + * ethtool_forced_speed_maps_init + * @maps: Pointer to an array of Ethtool forced speed map + * @size: Array size + * + * Initialize an array of Ethtool forced speed map to Ethtool link modes. This + * should be called during driver module init. + */ +void +ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size) +{ + for (u32 i = 0; i < size; i++) { + struct ethtool_forced_speed_map *map = &maps[i]; + + linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); + map->cap_arr = NULL; + map->arr_size = 0; + } +} +EXPORT_SYMBOL_GPL(ethtool_forced_speed_maps_init); + +void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id) +{ + struct ethtool_rxfh_context *ctx; + + WARN_ONCE(!rtnl_is_locked() && + !lockdep_is_held_type(&dev->ethtool->rss_lock, -1), + "RSS context lock assertion failed\n"); + + netdev_err(dev, "device error, RSS context %d lost\n", context_id); + ctx = xa_erase(&dev->ethtool->rss_ctx, context_id); + kfree(ctx); + ethtool_rss_notify(dev, ETHTOOL_MSG_RSS_DELETE_NTF, context_id); +} +EXPORT_SYMBOL(ethtool_rxfh_context_lost); diff --git a/net/ethtool/common.h b/net/ethtool/common.h new file mode 100644 index 000000000000..1609cf4e53eb --- /dev/null +++ b/net/ethtool/common.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ETHTOOL_COMMON_H +#define _ETHTOOL_COMMON_H + +#include <linux/netdevice.h> +#include <linux/ethtool.h> + +#define ETHTOOL_DEV_FEATURE_WORDS DIV_ROUND_UP(NETDEV_FEATURE_COUNT, 32) + +/* compose link mode index from speed, type and duplex */ +#define ETHTOOL_LINK_MODE(speed, type, duplex) \ + ETHTOOL_LINK_MODE_ ## speed ## base ## type ## _ ## duplex ## _BIT + +#define __SOF_TIMESTAMPING_CNT (const_ilog2(SOF_TIMESTAMPING_LAST) + 1) +#define __HWTSTAMP_FLAG_CNT (const_ilog2(HWTSTAMP_FLAG_LAST) + 1) + +struct genl_info; +struct hwtstamp_provider_desc; + +extern const char +netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]; +extern const char +rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN]; +extern const char +tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN]; +extern const char +phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN]; +extern const char link_mode_names[][ETH_GSTRING_LEN]; +extern const char netif_msg_class_names[][ETH_GSTRING_LEN]; +extern const char wol_mode_names[][ETH_GSTRING_LEN]; +extern const char sof_timestamping_names[][ETH_GSTRING_LEN]; +extern const char ts_tx_type_names[][ETH_GSTRING_LEN]; +extern const char ts_rx_filter_names[][ETH_GSTRING_LEN]; +extern const char ts_flags_names[][ETH_GSTRING_LEN]; +extern const char udp_tunnel_type_names[][ETH_GSTRING_LEN]; + +int __ethtool_get_link(struct net_device *dev); + +bool convert_legacy_settings_to_link_ksettings( + struct ethtool_link_ksettings *link_ksettings, + const struct ethtool_cmd *legacy_settings); +int ethtool_check_max_channel(struct net_device *dev, + struct ethtool_channels channels, + struct genl_info *info); +struct ethtool_rxfh_context * +ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops, + u32 indir_size, u32 key_size); +int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context); +int ethtool_rxfh_config_is_sym(u64 rxfh); + +void ethtool_ringparam_get_cfg(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kparam, + struct netlink_ext_ack *extack); + +int ethtool_get_rx_ring_count(struct net_device *dev); + +int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info); +int ethtool_get_ts_info_by_phc(struct net_device *dev, + struct kernel_ethtool_ts_info *info, + struct hwtstamp_provider_desc *hwprov_desc); +int ethtool_net_get_ts_info_by_phc(struct net_device *dev, + struct kernel_ethtool_ts_info *info, + struct hwtstamp_provider_desc *hwprov_desc); +struct phy_device * +ethtool_phy_get_ts_info_by_phc(struct net_device *dev, + struct kernel_ethtool_ts_info *info, + struct hwtstamp_provider_desc *hwprov_desc); +bool net_support_hwtstamp_qualifier(struct net_device *dev, + enum hwtstamp_provider_qualifier qualifier); + +extern const struct ethtool_phy_ops *ethtool_phy_ops; +extern const struct ethtool_pse_ops *ethtool_pse_ops; + +int ethtool_get_module_info_call(struct net_device *dev, + struct ethtool_modinfo *modinfo); +int ethtool_get_module_eeprom_call(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data); + +bool __ethtool_dev_mm_supported(struct net_device *dev); + +#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) +void ethtool_rss_notify(struct net_device *dev, u32 type, u32 rss_context); +#else +static inline void +ethtool_rss_notify(struct net_device *dev, u32 type, u32 rss_context) +{ +} +#endif + +#endif /* _ETHTOOL_COMMON_H */ diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c new file mode 100644 index 000000000000..0b2dea56d461 --- /dev/null +++ b/net/ethtool/debug.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "netlink.h" +#include "common.h" +#include "bitset.h" + +struct debug_req_info { + struct ethnl_req_info base; +}; + +struct debug_reply_data { + struct ethnl_reply_data base; + u32 msg_mask; +}; + +#define DEBUG_REPDATA(__reply_base) \ + container_of(__reply_base, struct debug_reply_data, base) + +const struct nla_policy ethnl_debug_get_policy[] = { + [ETHTOOL_A_DEBUG_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int debug_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct debug_reply_data *data = DEBUG_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + if (!dev->ethtool_ops->get_msglevel) + return -EOPNOTSUPP; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + data->msg_mask = dev->ethtool_ops->get_msglevel(dev); + ethnl_ops_complete(dev); + + return 0; +} + +static int debug_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct debug_reply_data *data = DEBUG_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + + return ethnl_bitset32_size(&data->msg_mask, NULL, NETIF_MSG_CLASS_COUNT, + netif_msg_class_names, compact); +} + +static int debug_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct debug_reply_data *data = DEBUG_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + + return ethnl_put_bitset32(skb, ETHTOOL_A_DEBUG_MSGMASK, &data->msg_mask, + NULL, NETIF_MSG_CLASS_COUNT, + netif_msg_class_names, compact); +} + +/* DEBUG_SET */ + +const struct nla_policy ethnl_debug_set_policy[] = { + [ETHTOOL_A_DEBUG_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_DEBUG_MSGMASK] = { .type = NLA_NESTED }, +}; + +static int +ethnl_set_debug_validate(struct ethnl_req_info *req_info, + struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + + return ops->get_msglevel && ops->set_msglevel ? 1 : -EOPNOTSUPP; +} + +static int +ethnl_set_debug(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct net_device *dev = req_info->dev; + struct nlattr **tb = info->attrs; + bool mod = false; + u32 msg_mask; + int ret; + + msg_mask = dev->ethtool_ops->get_msglevel(dev); + ret = ethnl_update_bitset32(&msg_mask, NETIF_MSG_CLASS_COUNT, + tb[ETHTOOL_A_DEBUG_MSGMASK], + netif_msg_class_names, info->extack, &mod); + if (ret < 0 || !mod) + return ret; + + dev->ethtool_ops->set_msglevel(dev, msg_mask); + return 1; +} + +const struct ethnl_request_ops ethnl_debug_request_ops = { + .request_cmd = ETHTOOL_MSG_DEBUG_GET, + .reply_cmd = ETHTOOL_MSG_DEBUG_GET_REPLY, + .hdr_attr = ETHTOOL_A_DEBUG_HEADER, + .req_info_size = sizeof(struct debug_req_info), + .reply_data_size = sizeof(struct debug_reply_data), + + .prepare_data = debug_prepare_data, + .reply_size = debug_reply_size, + .fill_reply = debug_fill_reply, + + .set_validate = ethnl_set_debug_validate, + .set = ethnl_set_debug, + .set_ntf_cmd = ETHTOOL_MSG_DEBUG_NTF, +}; diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c new file mode 100644 index 000000000000..bf398973eb8a --- /dev/null +++ b/net/ethtool/eee.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "netlink.h" +#include "common.h" +#include "bitset.h" + +struct eee_req_info { + struct ethnl_req_info base; +}; + +struct eee_reply_data { + struct ethnl_reply_data base; + struct ethtool_keee eee; +}; + +#define EEE_REPDATA(__reply_base) \ + container_of(__reply_base, struct eee_reply_data, base) + +const struct nla_policy ethnl_eee_get_policy[] = { + [ETHTOOL_A_EEE_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int eee_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct eee_reply_data *data = EEE_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + struct ethtool_keee *eee = &data->eee; + int ret; + + if (!dev->ethtool_ops->get_eee) + return -EOPNOTSUPP; + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + ret = dev->ethtool_ops->get_eee(dev, eee); + ethnl_ops_complete(dev); + + return ret; +} + +static int eee_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct eee_reply_data *data = EEE_REPDATA(reply_base); + const struct ethtool_keee *eee = &data->eee; + int len = 0; + int ret; + + /* MODES_OURS */ + ret = ethnl_bitset_size(eee->advertised, eee->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, + link_mode_names, compact); + if (ret < 0) + return ret; + len += ret; + /* MODES_PEERS */ + ret = ethnl_bitset_size(eee->lp_advertised, NULL, + __ETHTOOL_LINK_MODE_MASK_NBITS, + link_mode_names, compact); + if (ret < 0) + return ret; + len += ret; + + len += nla_total_size(sizeof(u8)) + /* _EEE_ACTIVE */ + nla_total_size(sizeof(u8)) + /* _EEE_ENABLED */ + nla_total_size(sizeof(u8)) + /* _EEE_TX_LPI_ENABLED */ + nla_total_size(sizeof(u32)); /* _EEE_TX_LPI_TIMER */ + + return len; +} + +static int eee_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct eee_reply_data *data = EEE_REPDATA(reply_base); + const struct ethtool_keee *eee = &data->eee; + int ret; + + ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_OURS, + eee->advertised, eee->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, + link_mode_names, compact); + if (ret < 0) + return ret; + ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_PEER, + eee->lp_advertised, NULL, + __ETHTOOL_LINK_MODE_MASK_NBITS, + link_mode_names, compact); + if (ret < 0) + return ret; + + if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, eee->eee_active) || + nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, eee->eee_enabled) || + nla_put_u8(skb, ETHTOOL_A_EEE_TX_LPI_ENABLED, + eee->tx_lpi_enabled) || + nla_put_u32(skb, ETHTOOL_A_EEE_TX_LPI_TIMER, eee->tx_lpi_timer)) + return -EMSGSIZE; + + return 0; +} + +/* EEE_SET */ + +const struct nla_policy ethnl_eee_set_policy[] = { + [ETHTOOL_A_EEE_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_EEE_MODES_OURS] = { .type = NLA_NESTED }, + [ETHTOOL_A_EEE_ENABLED] = { .type = NLA_U8 }, + [ETHTOOL_A_EEE_TX_LPI_ENABLED] = { .type = NLA_U8 }, + [ETHTOOL_A_EEE_TX_LPI_TIMER] = { .type = NLA_U32 }, +}; + +static int +ethnl_set_eee_validate(struct ethnl_req_info *req_info, struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + + return ops->get_eee && ops->set_eee ? 1 : -EOPNOTSUPP; +} + +static int +ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct net_device *dev = req_info->dev; + struct nlattr **tb = info->attrs; + struct ethtool_keee eee = {}; + bool mod = false; + int ret; + + ret = dev->ethtool_ops->get_eee(dev, &eee); + if (ret < 0) + return ret; + + ret = ethnl_update_bitset(eee.advertised, + __ETHTOOL_LINK_MODE_MASK_NBITS, + tb[ETHTOOL_A_EEE_MODES_OURS], + link_mode_names, info->extack, &mod); + if (ret < 0) + return ret; + ethnl_update_bool(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod); + ethnl_update_bool(&eee.tx_lpi_enabled, tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], + &mod); + ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER], + &mod); + if (!mod) + return 0; + + ret = dev->ethtool_ops->set_eee(dev, &eee); + return ret < 0 ? ret : 1; +} + +const struct ethnl_request_ops ethnl_eee_request_ops = { + .request_cmd = ETHTOOL_MSG_EEE_GET, + .reply_cmd = ETHTOOL_MSG_EEE_GET_REPLY, + .hdr_attr = ETHTOOL_A_EEE_HEADER, + .req_info_size = sizeof(struct eee_req_info), + .reply_data_size = sizeof(struct eee_reply_data), + + .prepare_data = eee_prepare_data, + .reply_size = eee_reply_size, + .fill_reply = eee_fill_reply, + + .set_validate = ethnl_set_eee_validate, + .set = ethnl_set_eee, + .set_ntf_cmd = ETHTOOL_MSG_EEE_NTF, +}; diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c new file mode 100644 index 000000000000..3b8209e930fd --- /dev/null +++ b/net/ethtool/eeprom.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool.h> +#include <linux/sfp.h> +#include "netlink.h" +#include "common.h" + +struct eeprom_req_info { + struct ethnl_req_info base; + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; +}; + +struct eeprom_reply_data { + struct ethnl_reply_data base; + u32 length; + u8 *data; +}; + +#define MODULE_EEPROM_REQINFO(__req_base) \ + container_of(__req_base, struct eeprom_req_info, base) + +#define MODULE_EEPROM_REPDATA(__reply_base) \ + container_of(__reply_base, struct eeprom_reply_data, base) + +static int fallback_set_params(struct eeprom_req_info *request, + struct ethtool_modinfo *modinfo, + struct ethtool_eeprom *eeprom) +{ + u32 offset = request->offset; + u32 length = request->length; + + if (request->page) + offset = request->page * ETH_MODULE_EEPROM_PAGE_LEN + offset; + + if (modinfo->type == ETH_MODULE_SFF_8472 && + request->i2c_address == 0x51) + offset += ETH_MODULE_EEPROM_PAGE_LEN * 2; + + if (offset >= modinfo->eeprom_len) + return -EINVAL; + + eeprom->cmd = ETHTOOL_GMODULEEEPROM; + eeprom->len = length; + eeprom->offset = offset; + + return 0; +} + +static int eeprom_fallback(struct eeprom_req_info *request, + struct eeprom_reply_data *reply) +{ + struct net_device *dev = reply->base.dev; + struct ethtool_modinfo modinfo = {0}; + struct ethtool_eeprom eeprom = {0}; + u8 *data; + int err; + + modinfo.cmd = ETHTOOL_GMODULEINFO; + err = ethtool_get_module_info_call(dev, &modinfo); + if (err < 0) + return err; + + err = fallback_set_params(request, &modinfo, &eeprom); + if (err < 0) + return err; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return -ENOMEM; + err = ethtool_get_module_eeprom_call(dev, &eeprom, data); + if (err < 0) + goto err_out; + + reply->data = data; + reply->length = eeprom.len; + + return 0; + +err_out: + kfree(data); + return err; +} + +static int get_module_eeprom_by_page(struct net_device *dev, + struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(extack, + "Module firmware flashing is in progress"); + return -EBUSY; + } + + if (dev->sfp_bus) + return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack); + + if (ops->get_module_eeprom_by_page) + return ops->get_module_eeprom_by_page(dev, page_data, extack); + + return -EOPNOTSUPP; +} + +static int eeprom_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct eeprom_reply_data *reply = MODULE_EEPROM_REPDATA(reply_base); + struct eeprom_req_info *request = MODULE_EEPROM_REQINFO(req_base); + struct ethtool_module_eeprom page_data = {0}; + struct net_device *dev = reply_base->dev; + int ret; + + page_data.offset = request->offset; + page_data.length = request->length; + page_data.i2c_address = request->i2c_address; + page_data.page = request->page; + page_data.bank = request->bank; + page_data.data = kmalloc(page_data.length, GFP_KERNEL); + if (!page_data.data) + return -ENOMEM; + + ret = ethnl_ops_begin(dev); + if (ret) + goto err_free; + + ret = get_module_eeprom_by_page(dev, &page_data, info->extack); + if (ret < 0) + goto err_ops; + + reply->length = ret; + reply->data = page_data.data; + + ethnl_ops_complete(dev); + return 0; + +err_ops: + ethnl_ops_complete(dev); +err_free: + kfree(page_data.data); + + if (ret == -EOPNOTSUPP) + return eeprom_fallback(request, reply); + return ret; +} + +static int eeprom_parse_request(struct ethnl_req_info *req_info, struct nlattr **tb, + struct netlink_ext_ack *extack) +{ + struct eeprom_req_info *request = MODULE_EEPROM_REQINFO(req_info); + + if (!tb[ETHTOOL_A_MODULE_EEPROM_OFFSET] || + !tb[ETHTOOL_A_MODULE_EEPROM_LENGTH] || + !tb[ETHTOOL_A_MODULE_EEPROM_PAGE] || + !tb[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS]) + return -EINVAL; + + request->i2c_address = nla_get_u8(tb[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS]); + request->offset = nla_get_u32(tb[ETHTOOL_A_MODULE_EEPROM_OFFSET]); + request->length = nla_get_u32(tb[ETHTOOL_A_MODULE_EEPROM_LENGTH]); + + /* The following set of conditions limit the API to only dump 1/2 + * EEPROM page without crossing low page boundary located at offset 128. + * This means user may only request dumps of length limited to 128 from + * either low 128 bytes or high 128 bytes. + * For pages higher than 0 only high 128 bytes are accessible. + */ + request->page = nla_get_u8(tb[ETHTOOL_A_MODULE_EEPROM_PAGE]); + if (request->page && request->offset < ETH_MODULE_EEPROM_PAGE_LEN) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MODULE_EEPROM_PAGE], + "reading from lower half page is allowed for page 0 only"); + return -EINVAL; + } + + if (request->offset < ETH_MODULE_EEPROM_PAGE_LEN && + request->offset + request->length > ETH_MODULE_EEPROM_PAGE_LEN) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MODULE_EEPROM_LENGTH], + "reading cross half page boundary is illegal"); + return -EINVAL; + } else if (request->offset + request->length > ETH_MODULE_EEPROM_PAGE_LEN * 2) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MODULE_EEPROM_LENGTH], + "reading cross page boundary is illegal"); + return -EINVAL; + } + + if (tb[ETHTOOL_A_MODULE_EEPROM_BANK]) + request->bank = nla_get_u8(tb[ETHTOOL_A_MODULE_EEPROM_BANK]); + + return 0; +} + +static int eeprom_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct eeprom_req_info *request = MODULE_EEPROM_REQINFO(req_base); + + return nla_total_size(sizeof(u8) * request->length); /* _EEPROM_DATA */ +} + +static int eeprom_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + struct eeprom_reply_data *reply = MODULE_EEPROM_REPDATA(reply_base); + + return nla_put(skb, ETHTOOL_A_MODULE_EEPROM_DATA, reply->length, reply->data); +} + +static void eeprom_cleanup_data(struct ethnl_reply_data *reply_base) +{ + struct eeprom_reply_data *reply = MODULE_EEPROM_REPDATA(reply_base); + + kfree(reply->data); +} + +const struct ethnl_request_ops ethnl_module_eeprom_request_ops = { + .request_cmd = ETHTOOL_MSG_MODULE_EEPROM_GET, + .reply_cmd = ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY, + .hdr_attr = ETHTOOL_A_MODULE_EEPROM_HEADER, + .req_info_size = sizeof(struct eeprom_req_info), + .reply_data_size = sizeof(struct eeprom_reply_data), + + .parse_request = eeprom_parse_request, + .prepare_data = eeprom_prepare_data, + .reply_size = eeprom_reply_size, + .fill_reply = eeprom_fill_reply, + .cleanup_data = eeprom_cleanup_data, +}; + +const struct nla_policy ethnl_module_eeprom_get_policy[] = { + [ETHTOOL_A_MODULE_EEPROM_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_MODULE_EEPROM_OFFSET] = + NLA_POLICY_MAX(NLA_U32, ETH_MODULE_EEPROM_PAGE_LEN * 2 - 1), + [ETHTOOL_A_MODULE_EEPROM_LENGTH] = + NLA_POLICY_RANGE(NLA_U32, 1, ETH_MODULE_EEPROM_PAGE_LEN), + [ETHTOOL_A_MODULE_EEPROM_PAGE] = { .type = NLA_U8 }, + [ETHTOOL_A_MODULE_EEPROM_BANK] = { .type = NLA_U8 }, + [ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS] = + NLA_POLICY_RANGE(NLA_U8, 0, ETH_MODULE_MAX_I2C_ADDRESS), +}; + diff --git a/net/ethtool/features.c b/net/ethtool/features.c new file mode 100644 index 000000000000..f2217983be2b --- /dev/null +++ b/net/ethtool/features.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <net/netdev_lock.h> + +#include "netlink.h" +#include "common.h" +#include "bitset.h" + +struct features_req_info { + struct ethnl_req_info base; +}; + +struct features_reply_data { + struct ethnl_reply_data base; + u32 hw[ETHTOOL_DEV_FEATURE_WORDS]; + u32 wanted[ETHTOOL_DEV_FEATURE_WORDS]; + u32 active[ETHTOOL_DEV_FEATURE_WORDS]; + u32 nochange[ETHTOOL_DEV_FEATURE_WORDS]; + u32 all[ETHTOOL_DEV_FEATURE_WORDS]; +}; + +#define FEATURES_REPDATA(__reply_base) \ + container_of(__reply_base, struct features_reply_data, base) + +const struct nla_policy ethnl_features_get_policy[] = { + [ETHTOOL_A_FEATURES_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static void ethnl_features_to_bitmap32(u32 *dest, netdev_features_t src) +{ + unsigned int i; + + for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; i++) + dest[i] = src >> (32 * i); +} + +static int features_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct features_reply_data *data = FEATURES_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + netdev_features_t all_features; + + ethnl_features_to_bitmap32(data->hw, dev->hw_features); + ethnl_features_to_bitmap32(data->wanted, dev->wanted_features); + ethnl_features_to_bitmap32(data->active, dev->features); + ethnl_features_to_bitmap32(data->nochange, NETIF_F_NEVER_CHANGE); + all_features = GENMASK_ULL(NETDEV_FEATURE_COUNT - 1, 0); + ethnl_features_to_bitmap32(data->all, all_features); + + return 0; +} + +static int features_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct features_reply_data *data = FEATURES_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + unsigned int len = 0; + int ret; + + ret = ethnl_bitset32_size(data->hw, data->all, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + return ret; + len += ret; + ret = ethnl_bitset32_size(data->wanted, NULL, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + return ret; + len += ret; + ret = ethnl_bitset32_size(data->active, NULL, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + return ret; + len += ret; + ret = ethnl_bitset32_size(data->nochange, NULL, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + return ret; + len += ret; + + return len; +} + +static int features_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct features_reply_data *data = FEATURES_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + int ret; + + ret = ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_HW, data->hw, + data->all, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + return ret; + ret = ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_WANTED, data->wanted, + NULL, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + return ret; + ret = ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_ACTIVE, data->active, + NULL, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + return ret; + return ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_NOCHANGE, + data->nochange, NULL, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); +} + +const struct ethnl_request_ops ethnl_features_request_ops = { + .request_cmd = ETHTOOL_MSG_FEATURES_GET, + .reply_cmd = ETHTOOL_MSG_FEATURES_GET_REPLY, + .hdr_attr = ETHTOOL_A_FEATURES_HEADER, + .req_info_size = sizeof(struct features_req_info), + .reply_data_size = sizeof(struct features_reply_data), + + .prepare_data = features_prepare_data, + .reply_size = features_reply_size, + .fill_reply = features_fill_reply, +}; + +/* FEATURES_SET */ + +const struct nla_policy ethnl_features_set_policy[] = { + [ETHTOOL_A_FEATURES_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_FEATURES_WANTED] = { .type = NLA_NESTED }, +}; + +static void ethnl_features_to_bitmap(unsigned long *dest, netdev_features_t val) +{ + const unsigned int words = BITS_TO_LONGS(NETDEV_FEATURE_COUNT); + unsigned int i; + + for (i = 0; i < words; i++) + dest[i] = (unsigned long)(val >> (i * BITS_PER_LONG)); +} + +static netdev_features_t ethnl_bitmap_to_features(unsigned long *src) +{ + const unsigned int nft_bits = sizeof(netdev_features_t) * BITS_PER_BYTE; + const unsigned int words = BITS_TO_LONGS(NETDEV_FEATURE_COUNT); + netdev_features_t ret = 0; + unsigned int i; + + for (i = 0; i < words; i++) + ret |= (netdev_features_t)(src[i]) << (i * BITS_PER_LONG); + ret &= ~(netdev_features_t)0 >> (nft_bits - NETDEV_FEATURE_COUNT); + return ret; +} + +static int features_send_reply(struct net_device *dev, struct genl_info *info, + const unsigned long *wanted, + const unsigned long *wanted_mask, + const unsigned long *active, + const unsigned long *active_mask, bool compact) +{ + struct sk_buff *rskb; + void *reply_payload; + int reply_len = 0; + int ret; + + reply_len = ethnl_reply_header_size(); + ret = ethnl_bitset_size(wanted, wanted_mask, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + goto err; + reply_len += ret; + ret = ethnl_bitset_size(active, active_mask, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + goto err; + reply_len += ret; + + ret = -ENOMEM; + rskb = ethnl_reply_init(reply_len, dev, ETHTOOL_MSG_FEATURES_SET_REPLY, + ETHTOOL_A_FEATURES_HEADER, info, + &reply_payload); + if (!rskb) + goto err; + + ret = ethnl_put_bitset(rskb, ETHTOOL_A_FEATURES_WANTED, wanted, + wanted_mask, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + goto nla_put_failure; + ret = ethnl_put_bitset(rskb, ETHTOOL_A_FEATURES_ACTIVE, active, + active_mask, NETDEV_FEATURE_COUNT, + netdev_features_strings, compact); + if (ret < 0) + goto nla_put_failure; + + genlmsg_end(rskb, reply_payload); + ret = genlmsg_reply(rskb, info); + return ret; + +nla_put_failure: + nlmsg_free(rskb); + WARN_ONCE(1, "calculated message payload length (%d) not sufficient\n", + reply_len); +err: + GENL_SET_ERR_MSG(info, "failed to send reply message"); + return ret; +} + +int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) +{ + DECLARE_BITMAP(wanted_diff_mask, NETDEV_FEATURE_COUNT); + DECLARE_BITMAP(active_diff_mask, NETDEV_FEATURE_COUNT); + DECLARE_BITMAP(old_active, NETDEV_FEATURE_COUNT); + DECLARE_BITMAP(old_wanted, NETDEV_FEATURE_COUNT); + DECLARE_BITMAP(new_active, NETDEV_FEATURE_COUNT); + DECLARE_BITMAP(new_wanted, NETDEV_FEATURE_COUNT); + DECLARE_BITMAP(req_wanted, NETDEV_FEATURE_COUNT); + DECLARE_BITMAP(req_mask, NETDEV_FEATURE_COUNT); + struct ethnl_req_info req_info = {}; + struct nlattr **tb = info->attrs; + struct net_device *dev; + bool mod; + int ret; + + if (!tb[ETHTOOL_A_FEATURES_WANTED]) + return -EINVAL; + ret = ethnl_parse_header_dev_get(&req_info, + tb[ETHTOOL_A_FEATURES_HEADER], + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + dev = req_info.dev; + + rtnl_lock(); + netdev_lock_ops(dev); + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto out_unlock; + ethnl_features_to_bitmap(old_active, dev->features); + ethnl_features_to_bitmap(old_wanted, dev->wanted_features); + ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT, + tb[ETHTOOL_A_FEATURES_WANTED], + netdev_features_strings, info->extack); + if (ret < 0) + goto out_ops; + if (ethnl_bitmap_to_features(req_mask) & ~NETIF_F_ETHTOOL_BITS) { + GENL_SET_ERR_MSG(info, "attempt to change non-ethtool features"); + ret = -EINVAL; + goto out_ops; + } + + /* set req_wanted bits not in req_mask from old_wanted */ + bitmap_and(req_wanted, req_wanted, req_mask, NETDEV_FEATURE_COUNT); + bitmap_andnot(new_wanted, old_wanted, req_mask, NETDEV_FEATURE_COUNT); + bitmap_or(req_wanted, new_wanted, req_wanted, NETDEV_FEATURE_COUNT); + if (!bitmap_equal(req_wanted, old_wanted, NETDEV_FEATURE_COUNT)) { + dev->wanted_features &= ~dev->hw_features; + dev->wanted_features |= ethnl_bitmap_to_features(req_wanted) & dev->hw_features; + __netdev_update_features(dev); + } + ethnl_features_to_bitmap(new_active, dev->features); + mod = !bitmap_equal(old_active, new_active, NETDEV_FEATURE_COUNT); + + ret = 0; + if (!(req_info.flags & ETHTOOL_FLAG_OMIT_REPLY)) { + bool compact = req_info.flags & ETHTOOL_FLAG_COMPACT_BITSETS; + + bitmap_xor(wanted_diff_mask, req_wanted, new_active, + NETDEV_FEATURE_COUNT); + bitmap_xor(active_diff_mask, old_active, new_active, + NETDEV_FEATURE_COUNT); + bitmap_and(wanted_diff_mask, wanted_diff_mask, req_mask, + NETDEV_FEATURE_COUNT); + bitmap_and(req_wanted, req_wanted, wanted_diff_mask, + NETDEV_FEATURE_COUNT); + bitmap_and(new_active, new_active, active_diff_mask, + NETDEV_FEATURE_COUNT); + + ret = features_send_reply(dev, info, req_wanted, + wanted_diff_mask, new_active, + active_diff_mask, compact); + } + if (mod) + netdev_features_change(dev); + +out_ops: + ethnl_ops_complete(dev); +out_unlock: + netdev_unlock_ops(dev); + rtnl_unlock(); + ethnl_parse_header_dev_put(&req_info); + return ret; +} diff --git a/net/ethtool/fec.c b/net/ethtool/fec.c new file mode 100644 index 000000000000..4669e74cbcaa --- /dev/null +++ b/net/ethtool/fec.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "netlink.h" +#include "common.h" +#include "bitset.h" + +struct fec_req_info { + struct ethnl_req_info base; +}; + +struct fec_reply_data { + struct ethnl_reply_data base; + __ETHTOOL_DECLARE_LINK_MODE_MASK(fec_link_modes); + u32 active_fec; + u8 fec_auto; + struct fec_stat_grp { + u64 stats[1 + ETHTOOL_MAX_LANES]; + u8 cnt; + } corr, uncorr, corr_bits; + struct ethtool_fec_hist fec_stat_hist; +}; + +#define FEC_REPDATA(__reply_base) \ + container_of(__reply_base, struct fec_reply_data, base) + +#define ETHTOOL_FEC_MASK ((ETHTOOL_FEC_LLRS << 1) - 1) + +const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1] = { + [ETHTOOL_A_FEC_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_stats), +}; + +static void +ethtool_fec_to_link_modes(u32 fec, unsigned long *link_modes, u8 *fec_auto) +{ + if (fec_auto) + *fec_auto = !!(fec & ETHTOOL_FEC_AUTO); + + if (fec & ETHTOOL_FEC_OFF) + __set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, link_modes); + if (fec & ETHTOOL_FEC_RS) + __set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, link_modes); + if (fec & ETHTOOL_FEC_BASER) + __set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, link_modes); + if (fec & ETHTOOL_FEC_LLRS) + __set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, link_modes); +} + +static int +ethtool_link_modes_to_fecparam(struct ethtool_fecparam *fec, + unsigned long *link_modes, u8 fec_auto) +{ + memset(fec, 0, sizeof(*fec)); + + if (fec_auto) + fec->fec |= ETHTOOL_FEC_AUTO; + + if (__test_and_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, link_modes)) + fec->fec |= ETHTOOL_FEC_OFF; + if (__test_and_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, link_modes)) + fec->fec |= ETHTOOL_FEC_RS; + if (__test_and_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, link_modes)) + fec->fec |= ETHTOOL_FEC_BASER; + if (__test_and_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, link_modes)) + fec->fec |= ETHTOOL_FEC_LLRS; + + if (!bitmap_empty(link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + return 0; +} + +static void +fec_stats_recalc(struct fec_stat_grp *grp, struct ethtool_fec_stat *stats) +{ + int i; + + if (stats->lanes[0] == ETHTOOL_STAT_NOT_SET) { + grp->stats[0] = stats->total; + grp->cnt = stats->total != ETHTOOL_STAT_NOT_SET; + return; + } + + grp->cnt = 1; + grp->stats[0] = 0; + for (i = 0; i < ETHTOOL_MAX_LANES; i++) { + if (stats->lanes[i] == ETHTOOL_STAT_NOT_SET) + break; + + grp->stats[0] += stats->lanes[i]; + grp->stats[grp->cnt++] = stats->lanes[i]; + } +} + +static int fec_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(active_fec_modes) = {}; + struct fec_reply_data *data = FEC_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + struct ethtool_fecparam fec = {}; + int ret; + + if (!dev->ethtool_ops->get_fecparam) + return -EOPNOTSUPP; + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + ret = dev->ethtool_ops->get_fecparam(dev, &fec); + if (ret) + goto out_complete; + if (req_base->flags & ETHTOOL_FLAG_STATS && + dev->ethtool_ops->get_fec_stats) { + struct ethtool_fec_stats stats; + + ethtool_stats_init((u64 *)&stats, sizeof(stats) / 8); + ethtool_stats_init((u64 *)data->fec_stat_hist.values, + sizeof(data->fec_stat_hist.values) / 8); + dev->ethtool_ops->get_fec_stats(dev, &stats, + &data->fec_stat_hist); + + fec_stats_recalc(&data->corr, &stats.corrected_blocks); + fec_stats_recalc(&data->uncorr, &stats.uncorrectable_blocks); + fec_stats_recalc(&data->corr_bits, &stats.corrected_bits); + } + + WARN_ON_ONCE(fec.reserved); + + ethtool_fec_to_link_modes(fec.fec, data->fec_link_modes, + &data->fec_auto); + + ethtool_fec_to_link_modes(fec.active_fec, active_fec_modes, NULL); + data->active_fec = find_first_bit(active_fec_modes, + __ETHTOOL_LINK_MODE_MASK_NBITS); + /* Don't report attr if no FEC mode set. Note that + * ethtool_fecparam_to_link_modes() ignores NONE and AUTO. + */ + if (data->active_fec == __ETHTOOL_LINK_MODE_MASK_NBITS) + data->active_fec = 0; + +out_complete: + ethnl_ops_complete(dev); + return ret; +} + +static int fec_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct fec_reply_data *data = FEC_REPDATA(reply_base); + int len = 0; + int ret; + + ret = ethnl_bitset_size(data->fec_link_modes, NULL, + __ETHTOOL_LINK_MODE_MASK_NBITS, + link_mode_names, compact); + if (ret < 0) + return ret; + len += ret; + + len += nla_total_size(sizeof(u8)) + /* _FEC_AUTO */ + nla_total_size(sizeof(u32)); /* _FEC_ACTIVE */ + + if (req_base->flags & ETHTOOL_FLAG_STATS) { + len += 3 * nla_total_size_64bit(sizeof(u64) * + (1 + ETHTOOL_MAX_LANES)); + /* add FEC bins information */ + len += (nla_total_size(0) + /* _A_FEC_HIST */ + nla_total_size(4) + /* _A_FEC_HIST_BIN_LOW */ + nla_total_size(4) + /* _A_FEC_HIST_BIN_HI */ + /* _A_FEC_HIST_BIN_VAL + per-lane values */ + nla_total_size_64bit(sizeof(u64)) + + nla_total_size_64bit(sizeof(u64) * ETHTOOL_MAX_LANES)) * + ETHTOOL_FEC_HIST_MAX; + } + + return len; +} + +static int fec_put_hist(struct sk_buff *skb, + const struct ethtool_fec_hist *hist) +{ + const struct ethtool_fec_hist_range *ranges = hist->ranges; + const struct ethtool_fec_hist_value *values = hist->values; + struct nlattr *nest; + int i, j; + u64 sum; + + if (!ranges) + return 0; + + for (i = 0; i < ETHTOOL_FEC_HIST_MAX; i++) { + if (i && !ranges[i].low && !ranges[i].high) + break; + + if (WARN_ON_ONCE(values[i].sum == ETHTOOL_STAT_NOT_SET && + values[i].per_lane[0] == ETHTOOL_STAT_NOT_SET)) + break; + + nest = nla_nest_start(skb, ETHTOOL_A_FEC_STAT_HIST); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_FEC_HIST_BIN_LOW, + ranges[i].low) || + nla_put_u32(skb, ETHTOOL_A_FEC_HIST_BIN_HIGH, + ranges[i].high)) + goto err_cancel_hist; + sum = 0; + for (j = 0; j < ETHTOOL_MAX_LANES; j++) { + if (values[i].per_lane[j] == ETHTOOL_STAT_NOT_SET) + break; + sum += values[i].per_lane[j]; + } + if (nla_put_uint(skb, ETHTOOL_A_FEC_HIST_BIN_VAL, + values[i].sum == ETHTOOL_STAT_NOT_SET ? + sum : values[i].sum)) + goto err_cancel_hist; + if (j && nla_put_64bit(skb, ETHTOOL_A_FEC_HIST_BIN_VAL_PER_LANE, + sizeof(u64) * j, + values[i].per_lane, + ETHTOOL_A_FEC_HIST_PAD)) + goto err_cancel_hist; + + nla_nest_end(skb, nest); + } + + return 0; + +err_cancel_hist: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int fec_put_stats(struct sk_buff *skb, const struct fec_reply_data *data) +{ + struct nlattr *nest; + + nest = nla_nest_start(skb, ETHTOOL_A_FEC_STATS); + if (!nest) + return -EMSGSIZE; + + if (nla_put_64bit(skb, ETHTOOL_A_FEC_STAT_CORRECTED, + sizeof(u64) * data->corr.cnt, + data->corr.stats, ETHTOOL_A_FEC_STAT_PAD) || + nla_put_64bit(skb, ETHTOOL_A_FEC_STAT_UNCORR, + sizeof(u64) * data->uncorr.cnt, + data->uncorr.stats, ETHTOOL_A_FEC_STAT_PAD) || + nla_put_64bit(skb, ETHTOOL_A_FEC_STAT_CORR_BITS, + sizeof(u64) * data->corr_bits.cnt, + data->corr_bits.stats, ETHTOOL_A_FEC_STAT_PAD)) + goto err_cancel; + + if (fec_put_hist(skb, &data->fec_stat_hist)) + goto err_cancel; + + nla_nest_end(skb, nest); + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int fec_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct fec_reply_data *data = FEC_REPDATA(reply_base); + int ret; + + ret = ethnl_put_bitset(skb, ETHTOOL_A_FEC_MODES, + data->fec_link_modes, NULL, + __ETHTOOL_LINK_MODE_MASK_NBITS, + link_mode_names, compact); + if (ret < 0) + return ret; + + if (nla_put_u8(skb, ETHTOOL_A_FEC_AUTO, data->fec_auto) || + (data->active_fec && + nla_put_u32(skb, ETHTOOL_A_FEC_ACTIVE, data->active_fec))) + return -EMSGSIZE; + + if (req_base->flags & ETHTOOL_FLAG_STATS && fec_put_stats(skb, data)) + return -EMSGSIZE; + + return 0; +} + +/* FEC_SET */ + +const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1] = { + [ETHTOOL_A_FEC_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_FEC_MODES] = { .type = NLA_NESTED }, + [ETHTOOL_A_FEC_AUTO] = NLA_POLICY_MAX(NLA_U8, 1), +}; + +static int +ethnl_set_fec_validate(struct ethnl_req_info *req_info, struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + + return ops->get_fecparam && ops->set_fecparam ? 1 : -EOPNOTSUPP; +} + +static int +ethnl_set_fec(struct ethnl_req_info *req_info, struct genl_info *info) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(fec_link_modes) = {}; + struct net_device *dev = req_info->dev; + struct nlattr **tb = info->attrs; + struct ethtool_fecparam fec = {}; + bool mod = false; + u8 fec_auto; + int ret; + + ret = dev->ethtool_ops->get_fecparam(dev, &fec); + if (ret < 0) + return ret; + + ethtool_fec_to_link_modes(fec.fec, fec_link_modes, &fec_auto); + + ret = ethnl_update_bitset(fec_link_modes, + __ETHTOOL_LINK_MODE_MASK_NBITS, + tb[ETHTOOL_A_FEC_MODES], + link_mode_names, info->extack, &mod); + if (ret < 0) + return ret; + ethnl_update_u8(&fec_auto, tb[ETHTOOL_A_FEC_AUTO], &mod); + if (!mod) + return 0; + + ret = ethtool_link_modes_to_fecparam(&fec, fec_link_modes, fec_auto); + if (ret) { + NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_FEC_MODES], + "invalid FEC modes requested"); + return ret; + } + if (!fec.fec) { + NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_FEC_MODES], + "no FEC modes set"); + return -EINVAL; + } + + ret = dev->ethtool_ops->set_fecparam(dev, &fec); + return ret < 0 ? ret : 1; +} + +const struct ethnl_request_ops ethnl_fec_request_ops = { + .request_cmd = ETHTOOL_MSG_FEC_GET, + .reply_cmd = ETHTOOL_MSG_FEC_GET_REPLY, + .hdr_attr = ETHTOOL_A_FEC_HEADER, + .req_info_size = sizeof(struct fec_req_info), + .reply_data_size = sizeof(struct fec_reply_data), + + .prepare_data = fec_prepare_data, + .reply_size = fec_reply_size, + .fill_reply = fec_fill_reply, + + .set_validate = ethnl_set_fec_validate, + .set = ethnl_set_fec, + .set_ntf_cmd = ETHTOOL_MSG_FEC_NTF, +}; diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c new file mode 100644 index 000000000000..fa83ddade4f8 --- /dev/null +++ b/net/ethtool/ioctl.c @@ -0,0 +1,3853 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * net/core/ethtool.c - Ethtool ioctl handler + * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> + * + * This file is where we call all the ethtool_ops commands to get + * the information ethtool needs. + */ + +#include <linux/compat.h> +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/capability.h> +#include <linux/errno.h> +#include <linux/ethtool.h> +#include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/phy.h> +#include <linux/bitops.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> +#include <linux/sfp.h> +#include <linux/slab.h> +#include <linux/rtnetlink.h> +#include <linux/sched/signal.h> +#include <linux/net.h> +#include <linux/pm_runtime.h> +#include <linux/utsname.h> +#include <net/devlink.h> +#include <net/ipv6.h> +#include <net/xdp_sock_drv.h> +#include <net/flow_offload.h> +#include <net/netdev_lock.h> +#include <linux/ethtool_netlink.h> +#include "common.h" + +/* State held across locks and calls for commands which have devlink fallback */ +struct ethtool_devlink_compat { + struct devlink *devlink; + union { + struct ethtool_flash efl; + struct ethtool_drvinfo info; + }; +}; + +static struct devlink *netdev_to_devlink_get(struct net_device *dev) +{ + if (!dev->devlink_port) + return NULL; + return devlink_try_get(dev->devlink_port->devlink); +} + +/* + * Some useful ethtool_ops methods that're device independent. + * If we find that all drivers want to do the same thing here, + * we can turn these into dev_() function calls. + */ + +u32 ethtool_op_get_link(struct net_device *dev) +{ + /* Synchronize carrier state with link watch, see also rtnl_getlink() */ + __linkwatch_sync_dev(dev); + + return netif_carrier_ok(dev) ? 1 : 0; +} +EXPORT_SYMBOL(ethtool_op_get_link); + +int ethtool_op_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + return 0; +} +EXPORT_SYMBOL(ethtool_op_get_ts_info); + +/* Handlers for each ethtool command */ + +static int ethtool_get_features(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_gfeatures cmd = { + .cmd = ETHTOOL_GFEATURES, + .size = ETHTOOL_DEV_FEATURE_WORDS, + }; + struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; + u32 __user *sizeaddr; + u32 copy_size; + int i; + + /* in case feature bits run out again */ + BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t)); + + for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { + features[i].available = (u32)(dev->hw_features >> (32 * i)); + features[i].requested = (u32)(dev->wanted_features >> (32 * i)); + features[i].active = (u32)(dev->features >> (32 * i)); + features[i].never_changed = + (u32)(NETIF_F_NEVER_CHANGE >> (32 * i)); + } + + sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size); + if (get_user(copy_size, sizeaddr)) + return -EFAULT; + + if (copy_size > ETHTOOL_DEV_FEATURE_WORDS) + copy_size = ETHTOOL_DEV_FEATURE_WORDS; + + if (copy_to_user(useraddr, &cmd, sizeof(cmd))) + return -EFAULT; + useraddr += sizeof(cmd); + if (copy_to_user(useraddr, features, + array_size(copy_size, sizeof(*features)))) + return -EFAULT; + + return 0; +} + +static int ethtool_set_features(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_sfeatures cmd; + struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; + netdev_features_t wanted = 0, valid = 0; + int i, ret = 0; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + useraddr += sizeof(cmd); + + if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS) + return -EINVAL; + + if (copy_from_user(features, useraddr, sizeof(features))) + return -EFAULT; + + for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { + valid |= (netdev_features_t)features[i].valid << (32 * i); + wanted |= (netdev_features_t)features[i].requested << (32 * i); + } + + if (valid & ~NETIF_F_ETHTOOL_BITS) + return -EINVAL; + + if (valid & ~dev->hw_features) { + valid &= dev->hw_features; + ret |= ETHTOOL_F_UNSUPPORTED; + } + + dev->wanted_features &= ~valid; + dev->wanted_features |= wanted & valid; + __netdev_update_features(dev); + + if ((dev->wanted_features ^ dev->features) & valid) + ret |= ETHTOOL_F_WISH; + + return ret; +} + +static int __ethtool_get_sset_count(struct net_device *dev, int sset) +{ + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (sset == ETH_SS_FEATURES) + return ARRAY_SIZE(netdev_features_strings); + + if (sset == ETH_SS_RSS_HASH_FUNCS) + return ARRAY_SIZE(rss_hash_func_strings); + + if (sset == ETH_SS_TUNABLES) + return ARRAY_SIZE(tunable_strings); + + if (sset == ETH_SS_PHY_TUNABLES) + return ARRAY_SIZE(phy_tunable_strings); + + if (sset == ETH_SS_PHY_STATS && dev->phydev && + !ops->get_ethtool_phy_stats && + phy_ops && phy_ops->get_sset_count) + return phy_ops->get_sset_count(dev->phydev); + + if (sset == ETH_SS_LINK_MODES) + return __ETHTOOL_LINK_MODE_MASK_NBITS; + + if (ops->get_sset_count && ops->get_strings) + return ops->get_sset_count(dev, sset); + else + return -EOPNOTSUPP; +} + +static void __ethtool_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (stringset == ETH_SS_FEATURES) + memcpy(data, netdev_features_strings, + sizeof(netdev_features_strings)); + else if (stringset == ETH_SS_RSS_HASH_FUNCS) + memcpy(data, rss_hash_func_strings, + sizeof(rss_hash_func_strings)); + else if (stringset == ETH_SS_TUNABLES) + memcpy(data, tunable_strings, sizeof(tunable_strings)); + else if (stringset == ETH_SS_PHY_TUNABLES) + memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings)); + else if (stringset == ETH_SS_PHY_STATS && dev->phydev && + !ops->get_ethtool_phy_stats && phy_ops && + phy_ops->get_strings) + phy_ops->get_strings(dev->phydev, data); + else if (stringset == ETH_SS_LINK_MODES) + memcpy(data, link_mode_names, + __ETHTOOL_LINK_MODE_MASK_NBITS * ETH_GSTRING_LEN); + else + /* ops->get_strings is valid because checked earlier */ + ops->get_strings(dev, stringset, data); +} + +static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) +{ + /* feature masks of legacy discrete ethtool ops */ + + switch (eth_cmd) { + case ETHTOOL_GTXCSUM: + case ETHTOOL_STXCSUM: + return NETIF_F_CSUM_MASK | NETIF_F_FCOE_CRC | + NETIF_F_SCTP_CRC; + case ETHTOOL_GRXCSUM: + case ETHTOOL_SRXCSUM: + return NETIF_F_RXCSUM; + case ETHTOOL_GSG: + case ETHTOOL_SSG: + return NETIF_F_SG | NETIF_F_FRAGLIST; + case ETHTOOL_GTSO: + case ETHTOOL_STSO: + return NETIF_F_ALL_TSO; + case ETHTOOL_GGSO: + case ETHTOOL_SGSO: + return NETIF_F_GSO; + case ETHTOOL_GGRO: + case ETHTOOL_SGRO: + return NETIF_F_GRO; + default: + BUG(); + } +} + +static int ethtool_get_one_feature(struct net_device *dev, + char __user *useraddr, u32 ethcmd) +{ + netdev_features_t mask = ethtool_get_feature_mask(ethcmd); + struct ethtool_value edata = { + .cmd = ethcmd, + .data = !!(dev->features & mask), + }; + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_one_feature(struct net_device *dev, + void __user *useraddr, u32 ethcmd) +{ + struct ethtool_value edata; + netdev_features_t mask; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + mask = ethtool_get_feature_mask(ethcmd); + mask &= dev->hw_features; + if (!mask) + return -EOPNOTSUPP; + + if (edata.data) + dev->wanted_features |= mask; + else + dev->wanted_features &= ~mask; + + __netdev_update_features(dev); + + return 0; +} + +#define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ + ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) +#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \ + NETIF_F_RXHASH) + +static u32 __ethtool_get_flags(struct net_device *dev) +{ + u32 flags = 0; + + if (dev->features & NETIF_F_LRO) + flags |= ETH_FLAG_LRO; + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + flags |= ETH_FLAG_RXVLAN; + if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) + flags |= ETH_FLAG_TXVLAN; + if (dev->features & NETIF_F_NTUPLE) + flags |= ETH_FLAG_NTUPLE; + if (dev->features & NETIF_F_RXHASH) + flags |= ETH_FLAG_RXHASH; + + return flags; +} + +static int __ethtool_set_flags(struct net_device *dev, u32 data) +{ + netdev_features_t features = 0, changed; + + if (data & ~ETH_ALL_FLAGS) + return -EINVAL; + + if (data & ETH_FLAG_LRO) + features |= NETIF_F_LRO; + if (data & ETH_FLAG_RXVLAN) + features |= NETIF_F_HW_VLAN_CTAG_RX; + if (data & ETH_FLAG_TXVLAN) + features |= NETIF_F_HW_VLAN_CTAG_TX; + if (data & ETH_FLAG_NTUPLE) + features |= NETIF_F_NTUPLE; + if (data & ETH_FLAG_RXHASH) + features |= NETIF_F_RXHASH; + + /* allow changing only bits set in hw_features */ + changed = (features ^ dev->features) & ETH_ALL_FEATURES; + if (changed & ~dev->hw_features) + return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; + + dev->wanted_features = + (dev->wanted_features & ~changed) | (features & changed); + + __netdev_update_features(dev); + + return 0; +} + +/* Given two link masks, AND them together and save the result in dst. */ +void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +EXPORT_SYMBOL(ethtool_intersect_link_masks); + +void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, + u32 legacy_u32) +{ + linkmode_zero(dst); + dst[0] = legacy_u32; +} +EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode); + +/* return false if src had higher bits set. lower bits always updated. */ +bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, + const unsigned long *src) +{ + *legacy_u32 = src[0]; + return find_next_bit(src, __ETHTOOL_LINK_MODE_MASK_NBITS, 32) == + __ETHTOOL_LINK_MODE_MASK_NBITS; +} +EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); + +/* return false if ksettings link modes had higher bits + * set. legacy_settings always updated (best effort) + */ +static bool +convert_link_ksettings_to_legacy_settings( + struct ethtool_cmd *legacy_settings, + const struct ethtool_link_ksettings *link_ksettings) +{ + bool retval = true; + + memset(legacy_settings, 0, sizeof(*legacy_settings)); + /* this also clears the deprecated fields in legacy structure: + * __u8 transceiver; + * __u32 maxtxpkt; + * __u32 maxrxpkt; + */ + + retval &= ethtool_convert_link_mode_to_legacy_u32( + &legacy_settings->supported, + link_ksettings->link_modes.supported); + retval &= ethtool_convert_link_mode_to_legacy_u32( + &legacy_settings->advertising, + link_ksettings->link_modes.advertising); + retval &= ethtool_convert_link_mode_to_legacy_u32( + &legacy_settings->lp_advertising, + link_ksettings->link_modes.lp_advertising); + ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed); + legacy_settings->duplex + = link_ksettings->base.duplex; + legacy_settings->port + = link_ksettings->base.port; + legacy_settings->phy_address + = link_ksettings->base.phy_address; + legacy_settings->autoneg + = link_ksettings->base.autoneg; + legacy_settings->mdio_support + = link_ksettings->base.mdio_support; + legacy_settings->eth_tp_mdix + = link_ksettings->base.eth_tp_mdix; + legacy_settings->eth_tp_mdix_ctrl + = link_ksettings->base.eth_tp_mdix_ctrl; + legacy_settings->transceiver + = link_ksettings->base.transceiver; + return retval; +} + +/* number of 32-bit words to store the user's link mode bitmaps */ +#define __ETHTOOL_LINK_MODE_MASK_NU32 \ + DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32) + +/* layout of the struct passed from/to userland */ +struct ethtool_link_usettings { + struct ethtool_link_settings base; + struct { + __u32 supported[__ETHTOOL_LINK_MODE_MASK_NU32]; + __u32 advertising[__ETHTOOL_LINK_MODE_MASK_NU32]; + __u32 lp_advertising[__ETHTOOL_LINK_MODE_MASK_NU32]; + } link_modes; +}; + +/* Internal kernel helper to query a device ethtool_link_settings. */ +int __ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings) +{ + ASSERT_RTNL(); + + if (!dev->ethtool_ops->get_link_ksettings) + return -EOPNOTSUPP; + + if (!netif_device_present(dev)) + return -ENODEV; + + memset(link_ksettings, 0, sizeof(*link_ksettings)); + return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); +} +EXPORT_SYMBOL(__ethtool_get_link_ksettings); + +/* convert ethtool_link_usettings in user space to a kernel internal + * ethtool_link_ksettings. return 0 on success, errno on error. + */ +static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to, + const void __user *from) +{ + struct ethtool_link_usettings link_usettings; + + if (copy_from_user(&link_usettings, from, sizeof(link_usettings))) + return -EFAULT; + + memcpy(&to->base, &link_usettings.base, sizeof(to->base)); + bitmap_from_arr32(to->link_modes.supported, + link_usettings.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_from_arr32(to->link_modes.advertising, + link_usettings.link_modes.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_from_arr32(to->link_modes.lp_advertising, + link_usettings.link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + return 0; +} + +/* Check if the user is trying to change anything besides speed/duplex */ +bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd) +{ + struct ethtool_link_settings base2 = {}; + + base2.speed = cmd->base.speed; + base2.port = PORT_OTHER; + base2.duplex = cmd->base.duplex; + base2.cmd = cmd->base.cmd; + base2.link_mode_masks_nwords = cmd->base.link_mode_masks_nwords; + + return !memcmp(&base2, &cmd->base, sizeof(base2)) && + bitmap_empty(cmd->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS) && + bitmap_empty(cmd->link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +/* convert a kernel internal ethtool_link_ksettings to + * ethtool_link_usettings in user space. return 0 on success, errno on + * error. + */ +static int +store_link_ksettings_for_user(void __user *to, + const struct ethtool_link_ksettings *from) +{ + struct ethtool_link_usettings link_usettings; + + memcpy(&link_usettings, from, sizeof(link_usettings)); + bitmap_to_arr32(link_usettings.link_modes.supported, + from->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_to_arr32(link_usettings.link_modes.advertising, + from->link_modes.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_to_arr32(link_usettings.link_modes.lp_advertising, + from->link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + if (copy_to_user(to, &link_usettings, sizeof(link_usettings))) + return -EFAULT; + + return 0; +} + +/* Query device for its ethtool_link_settings. */ +static int ethtool_get_link_ksettings(struct net_device *dev, + void __user *useraddr) +{ + int err = 0; + struct ethtool_link_ksettings link_ksettings; + + ASSERT_RTNL(); + if (!dev->ethtool_ops->get_link_ksettings) + return -EOPNOTSUPP; + + /* handle bitmap nbits handshake */ + if (copy_from_user(&link_ksettings.base, useraddr, + sizeof(link_ksettings.base))) + return -EFAULT; + + if (__ETHTOOL_LINK_MODE_MASK_NU32 + != link_ksettings.base.link_mode_masks_nwords) { + /* wrong link mode nbits requested */ + memset(&link_ksettings, 0, sizeof(link_ksettings)); + link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS; + /* send back number of words required as negative val */ + compiletime_assert(__ETHTOOL_LINK_MODE_MASK_NU32 <= S8_MAX, + "need too many bits for link modes!"); + link_ksettings.base.link_mode_masks_nwords + = -((s8)__ETHTOOL_LINK_MODE_MASK_NU32); + + /* copy the base fields back to user, not the link + * mode bitmaps + */ + if (copy_to_user(useraddr, &link_ksettings.base, + sizeof(link_ksettings.base))) + return -EFAULT; + + return 0; + } + + /* handshake successful: user/kernel agree on + * link_mode_masks_nwords + */ + + memset(&link_ksettings, 0, sizeof(link_ksettings)); + err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); + if (err < 0) + return err; + + /* make sure we tell the right values to user */ + link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS; + link_ksettings.base.link_mode_masks_nwords + = __ETHTOOL_LINK_MODE_MASK_NU32; + link_ksettings.base.master_slave_cfg = MASTER_SLAVE_CFG_UNSUPPORTED; + link_ksettings.base.master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; + link_ksettings.base.rate_matching = RATE_MATCH_NONE; + + return store_link_ksettings_for_user(useraddr, &link_ksettings); +} + +/* Update device ethtool_link_settings. */ +static int ethtool_set_link_ksettings(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_link_ksettings link_ksettings = {}; + int err; + + ASSERT_RTNL(); + + if (!dev->ethtool_ops->set_link_ksettings) + return -EOPNOTSUPP; + + /* make sure nbits field has expected value */ + if (copy_from_user(&link_ksettings.base, useraddr, + sizeof(link_ksettings.base))) + return -EFAULT; + + if (__ETHTOOL_LINK_MODE_MASK_NU32 + != link_ksettings.base.link_mode_masks_nwords) + return -EINVAL; + + /* copy the whole structure, now that we know it has expected + * format + */ + err = load_link_ksettings_from_user(&link_ksettings, useraddr); + if (err) + return err; + + /* re-check nwords field, just in case */ + if (__ETHTOOL_LINK_MODE_MASK_NU32 + != link_ksettings.base.link_mode_masks_nwords) + return -EINVAL; + + if (link_ksettings.base.master_slave_cfg || + link_ksettings.base.master_slave_state) + return -EINVAL; + + err = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); + if (err >= 0) { + ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF); + ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF); + } + return err; +} + +int ethtool_virtdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd, + u32 *dev_speed, u8 *dev_duplex) +{ + u32 speed; + u8 duplex; + + speed = cmd->base.speed; + duplex = cmd->base.duplex; + /* don't allow custom speed and duplex */ + if (!ethtool_validate_speed(speed) || + !ethtool_validate_duplex(duplex) || + !ethtool_virtdev_validate_cmd(cmd)) + return -EINVAL; + *dev_speed = speed; + *dev_duplex = duplex; + + return 0; +} +EXPORT_SYMBOL(ethtool_virtdev_set_link_ksettings); + +/* Query device for its ethtool_cmd settings. + * + * Backward compatibility note: for compatibility with legacy ethtool, this is + * now implemented via get_link_ksettings. When driver reports higher link mode + * bits, a kernel warning is logged once (with name of 1st driver/device) to + * recommend user to upgrade ethtool, but the command is successful (only the + * lower link mode bits reported back to user). Deprecated fields from + * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero. + */ +static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_link_ksettings link_ksettings; + struct ethtool_cmd cmd; + int err; + + ASSERT_RTNL(); + if (!dev->ethtool_ops->get_link_ksettings) + return -EOPNOTSUPP; + + if (dev->ethtool->module_fw_flash_in_progress) + return -EBUSY; + + memset(&link_ksettings, 0, sizeof(link_ksettings)); + err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); + if (err < 0) + return err; + convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings); + + /* send a sensible cmd tag back to user */ + cmd.cmd = ETHTOOL_GSET; + + if (copy_to_user(useraddr, &cmd, sizeof(cmd))) + return -EFAULT; + + return 0; +} + +/* Update device link settings with given ethtool_cmd. + * + * Backward compatibility note: for compatibility with legacy ethtool, this is + * now always implemented via set_link_settings. When user's request updates + * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel + * warning is logged once (with name of 1st driver/device) to recommend user to + * upgrade ethtool, and the request is rejected. + */ +static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_link_ksettings link_ksettings; + struct ethtool_cmd cmd; + int ret; + + ASSERT_RTNL(); + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + if (!dev->ethtool_ops->set_link_ksettings) + return -EOPNOTSUPP; + + if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd)) + return -EINVAL; + link_ksettings.base.link_mode_masks_nwords = + __ETHTOOL_LINK_MODE_MASK_NU32; + ret = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); + if (ret >= 0) { + ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF); + ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF); + } + return ret; +} + +static int +ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct device *parent = dev->dev.parent; + + rsp->info.cmd = ETHTOOL_GDRVINFO; + strscpy(rsp->info.version, init_uts_ns.name.release, + sizeof(rsp->info.version)); + if (ops->get_drvinfo) { + ops->get_drvinfo(dev, &rsp->info); + if (!rsp->info.bus_info[0] && parent) + strscpy(rsp->info.bus_info, dev_name(parent), + sizeof(rsp->info.bus_info)); + if (!rsp->info.driver[0] && parent && parent->driver) + strscpy(rsp->info.driver, parent->driver->name, + sizeof(rsp->info.driver)); + } else if (parent && parent->driver) { + strscpy(rsp->info.bus_info, dev_name(parent), + sizeof(rsp->info.bus_info)); + strscpy(rsp->info.driver, parent->driver->name, + sizeof(rsp->info.driver)); + } else if (dev->rtnl_link_ops) { + strscpy(rsp->info.driver, dev->rtnl_link_ops->kind, + sizeof(rsp->info.driver)); + } else { + return -EOPNOTSUPP; + } + + /* + * this method of obtaining string set info is deprecated; + * Use ETHTOOL_GSSET_INFO instead. + */ + if (ops->get_sset_count) { + int rc; + + rc = ops->get_sset_count(dev, ETH_SS_TEST); + if (rc >= 0) + rsp->info.testinfo_len = rc; + rc = ops->get_sset_count(dev, ETH_SS_STATS); + if (rc >= 0) + rsp->info.n_stats = rc; + rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); + if (rc >= 0) + rsp->info.n_priv_flags = rc; + } + if (ops->get_regs_len) { + int ret = ops->get_regs_len(dev); + + if (ret > 0) + rsp->info.regdump_len = ret; + } + + if (ops->get_eeprom_len) + rsp->info.eedump_len = ops->get_eeprom_len(dev); + + if (!rsp->info.fw_version[0]) + rsp->devlink = netdev_to_devlink_get(dev); + + return 0; +} + +static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_sset_info info; + u64 sset_mask; + int i, idx = 0, n_bits = 0, ret, rc; + u32 *info_buf = NULL; + + if (copy_from_user(&info, useraddr, sizeof(info))) + return -EFAULT; + + /* store copy of mask, because we zero struct later on */ + sset_mask = info.sset_mask; + if (!sset_mask) + return 0; + + /* calculate size of return buffer */ + n_bits = hweight64(sset_mask); + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GSSET_INFO; + + info_buf = kcalloc(n_bits, sizeof(u32), GFP_USER); + if (!info_buf) + return -ENOMEM; + + /* + * fill return buffer based on input bitmask and successful + * get_sset_count return + */ + for (i = 0; i < 64; i++) { + if (!(sset_mask & (1ULL << i))) + continue; + + rc = __ethtool_get_sset_count(dev, i); + if (rc >= 0) { + info.sset_mask |= (1ULL << i); + info_buf[idx++] = rc; + } + } + + ret = -EFAULT; + if (copy_to_user(useraddr, &info, sizeof(info))) + goto out; + + useraddr += offsetof(struct ethtool_sset_info, data); + if (copy_to_user(useraddr, info_buf, array_size(idx, sizeof(u32)))) + goto out; + + ret = 0; + +out: + kfree(info_buf); + return ret; +} + +static noinline_for_stack int +ethtool_rxnfc_copy_from_compat(struct ethtool_rxnfc *rxnfc, + const struct compat_ethtool_rxnfc __user *useraddr, + size_t size) +{ + struct compat_ethtool_rxnfc crxnfc = {}; + + /* We expect there to be holes between fs.m_ext and + * fs.ring_cookie and at the end of fs, but nowhere else. + * On non-x86, no conversion should be needed. + */ + BUILD_BUG_ON(!IS_ENABLED(CONFIG_X86_64) && + sizeof(struct compat_ethtool_rxnfc) != + sizeof(struct ethtool_rxnfc)); + BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + + sizeof(useraddr->fs.m_ext) != + offsetof(struct ethtool_rxnfc, fs.m_ext) + + sizeof(rxnfc->fs.m_ext)); + BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.location) - + offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != + offsetof(struct ethtool_rxnfc, fs.location) - + offsetof(struct ethtool_rxnfc, fs.ring_cookie)); + + if (copy_from_user(&crxnfc, useraddr, min(size, sizeof(crxnfc)))) + return -EFAULT; + + *rxnfc = (struct ethtool_rxnfc) { + .cmd = crxnfc.cmd, + .flow_type = crxnfc.flow_type, + .data = crxnfc.data, + .fs = { + .flow_type = crxnfc.fs.flow_type, + .h_u = crxnfc.fs.h_u, + .h_ext = crxnfc.fs.h_ext, + .m_u = crxnfc.fs.m_u, + .m_ext = crxnfc.fs.m_ext, + .ring_cookie = crxnfc.fs.ring_cookie, + .location = crxnfc.fs.location, + }, + .rule_cnt = crxnfc.rule_cnt, + }; + + return 0; +} + +static int ethtool_rxnfc_copy_from_user(struct ethtool_rxnfc *rxnfc, + const void __user *useraddr, + size_t size) +{ + if (compat_need_64bit_alignment_fixup()) + return ethtool_rxnfc_copy_from_compat(rxnfc, useraddr, size); + + if (copy_from_user(rxnfc, useraddr, size)) + return -EFAULT; + + return 0; +} + +static int ethtool_rxnfc_copy_to_compat(void __user *useraddr, + const struct ethtool_rxnfc *rxnfc, + size_t size, const u32 *rule_buf) +{ + struct compat_ethtool_rxnfc crxnfc; + + memset(&crxnfc, 0, sizeof(crxnfc)); + crxnfc = (struct compat_ethtool_rxnfc) { + .cmd = rxnfc->cmd, + .flow_type = rxnfc->flow_type, + .data = rxnfc->data, + .fs = { + .flow_type = rxnfc->fs.flow_type, + .h_u = rxnfc->fs.h_u, + .h_ext = rxnfc->fs.h_ext, + .m_u = rxnfc->fs.m_u, + .m_ext = rxnfc->fs.m_ext, + .ring_cookie = rxnfc->fs.ring_cookie, + .location = rxnfc->fs.location, + }, + .rule_cnt = rxnfc->rule_cnt, + }; + + if (copy_to_user(useraddr, &crxnfc, min(size, sizeof(crxnfc)))) + return -EFAULT; + + return 0; +} + +static int ethtool_rxnfc_copy_struct(u32 cmd, struct ethtool_rxnfc *info, + size_t *info_size, void __user *useraddr) +{ + /* struct ethtool_rxnfc was originally defined for + * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data + * members. User-space might still be using that + * definition. + */ + if (cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH) + *info_size = (offsetof(struct ethtool_rxnfc, data) + + sizeof(info->data)); + + if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) + return -EFAULT; + + if ((cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH) && info->flow_type & FLOW_RSS) { + *info_size = sizeof(*info); + if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) + return -EFAULT; + /* Since malicious users may modify the original data, + * we need to check whether FLOW_RSS is still requested. + */ + if (!(info->flow_type & FLOW_RSS)) + return -EINVAL; + } + + if (info->cmd != cmd) + return -EINVAL; + + return 0; +} + +static int ethtool_rxnfc_copy_to_user(void __user *useraddr, + const struct ethtool_rxnfc *rxnfc, + size_t size, const u32 *rule_buf) +{ + int ret; + + if (compat_need_64bit_alignment_fixup()) { + ret = ethtool_rxnfc_copy_to_compat(useraddr, rxnfc, size, + rule_buf); + useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs); + } else { + ret = copy_to_user(useraddr, rxnfc, size); + useraddr += offsetof(struct ethtool_rxnfc, rule_locs); + } + + if (ret) + return -EFAULT; + + if (rule_buf) { + if (copy_to_user(useraddr, rule_buf, + rxnfc->rule_cnt * sizeof(u32))) + return -EFAULT; + } + + return 0; +} + +static bool flow_type_hashable(u32 flow_type) +{ + switch (flow_type) { + case ETHER_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + case GTPU_V4_FLOW: + case GTPU_V6_FLOW: + case GTPC_V4_FLOW: + case GTPC_V6_FLOW: + case GTPC_TEID_V4_FLOW: + case GTPC_TEID_V6_FLOW: + case GTPU_EH_V4_FLOW: + case GTPU_EH_V6_FLOW: + case GTPU_UL_V4_FLOW: + case GTPU_UL_V6_FLOW: + case GTPU_DL_V4_FLOW: + case GTPU_DL_V6_FLOW: + return true; + } + + return false; +} + +static bool flow_type_v6(u32 flow_type) +{ + switch (flow_type) { + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + case GTPU_V6_FLOW: + case GTPC_V6_FLOW: + case GTPC_TEID_V6_FLOW: + case GTPU_EH_V6_FLOW: + case GTPU_UL_V6_FLOW: + case GTPU_DL_V6_FLOW: + return true; + } + + return false; +} + +/* When adding a new type, update the assert and, if it's hashable, add it to + * the flow_type_hashable switch case. + */ +static_assert(GTPU_DL_V6_FLOW + 1 == __FLOW_TYPE_COUNT); + +static int ethtool_check_xfrm_rxfh(u32 input_xfrm, u64 rxfh) +{ + /* Sanity check: if symmetric-xor/symmetric-or-xor is set, then: + * 1 - no other fields besides IP src/dst and/or L4 src/dst are set + * 2 - If src is set, dst must also be set + */ + if ((input_xfrm != RXH_XFRM_NO_CHANGE && + input_xfrm & (RXH_XFRM_SYM_XOR | RXH_XFRM_SYM_OR_XOR)) && + !ethtool_rxfh_config_is_sym(rxfh)) + return -EINVAL; + + return 0; +} + +static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + int err; + u32 i; + + if (!input_xfrm || input_xfrm == RXH_XFRM_NO_CHANGE) + return 0; + + for (i = 0; i < __FLOW_TYPE_COUNT; i++) { + struct ethtool_rxfh_fields fields = { + .flow_type = i, + }; + + if (!flow_type_hashable(i)) + continue; + + if (ops->get_rxfh_fields(dev, &fields)) + continue; + + err = ethtool_check_xfrm_rxfh(input_xfrm, fields.data); + if (err) + return err; + } + + return 0; +} + +static noinline_for_stack int +ethtool_set_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxfh_fields fields = {}; + struct ethtool_rxnfc info; + size_t info_size = sizeof(info); + int rc; + + if (!ops->set_rxfh_fields) + return -EOPNOTSUPP; + + rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); + if (rc) + return rc; + + if (info.data & RXH_IP6_FL && !flow_type_v6(info.flow_type)) + return -EINVAL; + + if (info.flow_type & FLOW_RSS && info.rss_context && + !ops->rxfh_per_ctx_fields) + return -EINVAL; + + mutex_lock(&dev->ethtool->rss_lock); + if (ops->get_rxfh) { + struct ethtool_rxfh_param rxfh = {}; + + rc = ops->get_rxfh(dev, &rxfh); + if (rc) + goto exit_unlock; + + rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data); + if (rc) + goto exit_unlock; + } + + fields.data = info.data; + fields.flow_type = info.flow_type & ~FLOW_RSS; + if (info.flow_type & FLOW_RSS) + fields.rss_context = info.rss_context; + + rc = ops->set_rxfh_fields(dev, &fields, NULL); +exit_unlock: + mutex_unlock(&dev->ethtool->rss_lock); + if (rc) + return rc; + + ethtool_rss_notify(dev, ETHTOOL_MSG_RSS_NTF, fields.rss_context); + return 0; +} + +static noinline_for_stack int +ethtool_get_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr) +{ + struct ethtool_rxnfc info; + size_t info_size = sizeof(info); + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxfh_fields fields = {}; + int ret; + + if (!ops->get_rxfh_fields) + return -EOPNOTSUPP; + + ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); + if (ret) + return ret; + + if (info.flow_type & FLOW_RSS && info.rss_context && + !ops->rxfh_per_ctx_fields) + return -EINVAL; + + fields.flow_type = info.flow_type & ~FLOW_RSS; + if (info.flow_type & FLOW_RSS) + fields.rss_context = info.rss_context; + + mutex_lock(&dev->ethtool->rss_lock); + ret = ops->get_rxfh_fields(dev, &fields); + mutex_unlock(&dev->ethtool->rss_lock); + if (ret < 0) + return ret; + + info.data = fields.data; + + return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL); +} + +static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, + u32 cmd, void __user *useraddr) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxnfc info; + size_t info_size = sizeof(info); + int rc; + + if (!ops->set_rxnfc) + return -EOPNOTSUPP; + + rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); + if (rc) + return rc; + + if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS) { + /* Nonzero ring with RSS only makes sense + * if NIC adds them together + */ + if (!ops->cap_rss_rxnfc_adds && + ethtool_get_flow_spec_ring(info.fs.ring_cookie)) + return -EINVAL; + + if (info.rss_context && + !xa_load(&dev->ethtool->rss_ctx, info.rss_context)) + return -EINVAL; + } + + rc = ops->set_rxnfc(dev, &info); + if (rc) + return rc; + + if (cmd == ETHTOOL_SRXCLSRLINS && + ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL)) + return -EFAULT; + + return 0; +} + +static noinline_for_stack int ethtool_get_rxrings(struct net_device *dev, + u32 cmd, + void __user *useraddr) +{ + struct ethtool_rxnfc info; + size_t info_size; + int ret; + + info_size = sizeof(info); + ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); + if (ret) + return ret; + + ret = ethtool_get_rx_ring_count(dev); + if (ret < 0) + return ret; + + info.data = ret; + + return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL); +} + +static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, + u32 cmd, void __user *useraddr) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxnfc info; + void *rule_buf = NULL; + size_t info_size; + int ret; + + if (!ops->get_rxnfc) + return -EOPNOTSUPP; + + info_size = sizeof(info); + ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); + if (ret) + return ret; + + if (info.cmd == ETHTOOL_GRXCLSRLALL) { + if (info.rule_cnt > 0) { + if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) + rule_buf = kcalloc(info.rule_cnt, sizeof(u32), + GFP_USER); + if (!rule_buf) + return -ENOMEM; + } + } + + ret = ops->get_rxnfc(dev, &info, rule_buf); + if (ret < 0) + goto err_out; + + ret = ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, rule_buf); +err_out: + kfree(rule_buf); + + return ret; +} + +static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, + int num_rx_rings, + u32 size) +{ + int i; + + if (copy_from_user(indir, useraddr, array_size(size, sizeof(indir[0])))) + return -EFAULT; + + /* Validate ring indices */ + for (i = 0; i < size; i++) + if (indir[i] >= num_rx_rings) + return -EINVAL; + + return 0; +} + +u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; + +void netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(netdev_rss_key)); + net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key)); + memcpy(buffer, netdev_rss_key, len); +} +EXPORT_SYMBOL(netdev_rss_key_fill); + +static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_rxfh_param rxfh = {}; + u32 user_size; + int ret; + + if (!dev->ethtool_ops->get_rxfh_indir_size || + !dev->ethtool_ops->get_rxfh) + return -EOPNOTSUPP; + rxfh.indir_size = dev->ethtool_ops->get_rxfh_indir_size(dev); + if (rxfh.indir_size == 0) + return -EOPNOTSUPP; + + if (copy_from_user(&user_size, + useraddr + offsetof(struct ethtool_rxfh_indir, size), + sizeof(user_size))) + return -EFAULT; + + if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size), + &rxfh.indir_size, sizeof(rxfh.indir_size))) + return -EFAULT; + + /* If the user buffer size is 0, this is just a query for the + * device table size. Otherwise, if it's smaller than the + * device table size it's an error. + */ + if (user_size < rxfh.indir_size) + return user_size == 0 ? 0 : -EINVAL; + + rxfh.indir = kcalloc(rxfh.indir_size, sizeof(rxfh.indir[0]), GFP_USER); + if (!rxfh.indir) + return -ENOMEM; + + mutex_lock(&dev->ethtool->rss_lock); + ret = dev->ethtool_ops->get_rxfh(dev, &rxfh); + mutex_unlock(&dev->ethtool->rss_lock); + if (ret) + goto out; + if (copy_to_user(useraddr + + offsetof(struct ethtool_rxfh_indir, ring_index[0]), + rxfh.indir, rxfh.indir_size * sizeof(*rxfh.indir))) + ret = -EFAULT; + +out: + kfree(rxfh.indir); + return ret; +} + +static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, + void __user *useraddr) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxfh_param rxfh_dev = {}; + struct netlink_ext_ack *extack = NULL; + int num_rx_rings; + u32 user_size, i; + int ret; + u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]); + + if (!ops->get_rxfh_indir_size || !ops->set_rxfh) + return -EOPNOTSUPP; + + rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); + if (rxfh_dev.indir_size == 0) + return -EOPNOTSUPP; + + if (copy_from_user(&user_size, + useraddr + offsetof(struct ethtool_rxfh_indir, size), + sizeof(user_size))) + return -EFAULT; + + if (user_size != 0 && user_size != rxfh_dev.indir_size) + return -EINVAL; + + rxfh_dev.indir = kcalloc(rxfh_dev.indir_size, + sizeof(rxfh_dev.indir[0]), GFP_USER); + if (!rxfh_dev.indir) + return -ENOMEM; + + num_rx_rings = ethtool_get_rx_ring_count(dev); + if (num_rx_rings < 0) { + ret = num_rx_rings; + goto out; + } + + if (user_size == 0) { + u32 *indir = rxfh_dev.indir; + + for (i = 0; i < rxfh_dev.indir_size; i++) + indir[i] = ethtool_rxfh_indir_default(i, num_rx_rings); + } else { + ret = ethtool_copy_validate_indir(rxfh_dev.indir, + useraddr + ringidx_offset, + num_rx_rings, + rxfh_dev.indir_size); + if (ret) + goto out; + } + + rxfh_dev.hfunc = ETH_RSS_HASH_NO_CHANGE; + + mutex_lock(&dev->ethtool->rss_lock); + ret = ops->set_rxfh(dev, &rxfh_dev, extack); + if (ret) + goto out_unlock; + + /* indicate whether rxfh was set to default */ + if (user_size == 0) + dev->priv_flags &= ~IFF_RXFH_CONFIGURED; + else + dev->priv_flags |= IFF_RXFH_CONFIGURED; + +out_unlock: + mutex_unlock(&dev->ethtool->rss_lock); +out: + kfree(rxfh_dev.indir); + return ret; +} + +static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, + void __user *useraddr) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxfh_param rxfh_dev = {}; + u32 user_indir_size, user_key_size; + struct ethtool_rxfh_context *ctx; + struct ethtool_rxfh rxfh; + u32 indir_bytes; + u8 *rss_config; + u32 total_size; + int ret; + + if (!ops->get_rxfh) + return -EOPNOTSUPP; + + if (ops->get_rxfh_indir_size) + rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); + if (ops->get_rxfh_key_size) + rxfh_dev.key_size = ops->get_rxfh_key_size(dev); + + if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) + return -EFAULT; + user_indir_size = rxfh.indir_size; + user_key_size = rxfh.key_size; + + /* Check that reserved fields are 0 for now */ + if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32) + return -EINVAL; + /* Most drivers don't handle rss_context, check it's 0 as well */ + if (rxfh.rss_context && !ops->create_rxfh_context) + return -EOPNOTSUPP; + + rxfh.indir_size = rxfh_dev.indir_size; + rxfh.key_size = rxfh_dev.key_size; + if (copy_to_user(useraddr, &rxfh, sizeof(rxfh))) + return -EFAULT; + + if ((user_indir_size && user_indir_size != rxfh_dev.indir_size) || + (user_key_size && user_key_size != rxfh_dev.key_size)) + return -EINVAL; + + indir_bytes = user_indir_size * sizeof(rxfh_dev.indir[0]); + total_size = indir_bytes + user_key_size; + rss_config = kzalloc(total_size, GFP_USER); + if (!rss_config) + return -ENOMEM; + + if (user_indir_size) + rxfh_dev.indir = (u32 *)rss_config; + + if (user_key_size) + rxfh_dev.key = rss_config + indir_bytes; + + mutex_lock(&dev->ethtool->rss_lock); + if (rxfh.rss_context) { + ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); + if (!ctx) { + ret = -ENOENT; + goto out; + } + if (rxfh_dev.indir) + memcpy(rxfh_dev.indir, ethtool_rxfh_context_indir(ctx), + indir_bytes); + if (!ops->rxfh_per_ctx_key) { + rxfh_dev.key_size = 0; + } else { + if (rxfh_dev.key) + memcpy(rxfh_dev.key, + ethtool_rxfh_context_key(ctx), + user_key_size); + rxfh_dev.hfunc = ctx->hfunc; + } + rxfh_dev.input_xfrm = ctx->input_xfrm; + ret = 0; + } else { + ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev); + if (ret) + goto out; + } + + if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc), + &rxfh_dev.hfunc, sizeof(rxfh.hfunc))) { + ret = -EFAULT; + } else if (copy_to_user(useraddr + + offsetof(struct ethtool_rxfh, input_xfrm), + &rxfh_dev.input_xfrm, + sizeof(rxfh.input_xfrm))) { + ret = -EFAULT; + } else if (copy_to_user(useraddr + + offsetof(struct ethtool_rxfh, key_size), + &rxfh_dev.key_size, + sizeof(rxfh.key_size))) { + ret = -EFAULT; + } else if (copy_to_user(useraddr + + offsetof(struct ethtool_rxfh, rss_config[0]), + rss_config, total_size)) { + ret = -EFAULT; + } +out: + mutex_unlock(&dev->ethtool->rss_lock); + kfree(rss_config); + + return ret; +} + +static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, + void __user *useraddr) +{ + u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]); + const struct ethtool_ops *ops = dev->ethtool_ops; + u32 dev_indir_size = 0, dev_key_size = 0, i; + u32 user_indir_len = 0, indir_bytes = 0; + struct ethtool_rxfh_param rxfh_dev = {}; + struct ethtool_rxfh_context *ctx = NULL; + struct netlink_ext_ack *extack = NULL; + struct ethtool_rxfh rxfh; + bool create = false; + int num_rx_rings; + u8 *rss_config; + int ntf = 0; + int ret; + + if (!ops->set_rxfh) + return -EOPNOTSUPP; + + if (ops->get_rxfh_indir_size) + dev_indir_size = ops->get_rxfh_indir_size(dev); + if (ops->get_rxfh_key_size) + dev_key_size = ops->get_rxfh_key_size(dev); + + if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) + return -EFAULT; + + /* Check that reserved fields are 0 for now */ + if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32) + return -EINVAL; + /* Most drivers don't handle rss_context, check it's 0 as well */ + if (rxfh.rss_context && !ops->create_rxfh_context) + return -EOPNOTSUPP; + /* Check input data transformation capabilities */ + if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR && + rxfh.input_xfrm != RXH_XFRM_SYM_OR_XOR && + rxfh.input_xfrm != RXH_XFRM_NO_CHANGE) + return -EINVAL; + if (rxfh.input_xfrm != RXH_XFRM_NO_CHANGE && + rxfh.input_xfrm & ~ops->supported_input_xfrm) + return -EOPNOTSUPP; + create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC; + + if ((rxfh.indir_size && + rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE && + rxfh.indir_size != dev_indir_size) || + (rxfh.key_size && rxfh.key_size != dev_key_size)) + return -EINVAL; + + /* Must request at least one change: indir size, hash key, function + * or input transformation. + * There's no need for any of it in case of context creation. + */ + if (!create && + (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE && + rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE && + rxfh.input_xfrm == RXH_XFRM_NO_CHANGE)) + return -EINVAL; + + indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]); + + /* Check settings which may be global rather than per RSS-context */ + if (rxfh.rss_context && !ops->rxfh_per_ctx_key) + if (rxfh.key_size || + (rxfh.hfunc && rxfh.hfunc != ETH_RSS_HASH_NO_CHANGE) || + (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_NO_CHANGE)) + return -EOPNOTSUPP; + + rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER); + if (!rss_config) + return -ENOMEM; + + num_rx_rings = ethtool_get_rx_ring_count(dev); + if (num_rx_rings < 0) { + ret = num_rx_rings; + goto out_free; + } + + /* rxfh.indir_size == 0 means reset the indir table to default (master + * context) or delete the context (other RSS contexts). + * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged. + */ + if (rxfh.indir_size && + rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) { + user_indir_len = indir_bytes; + rxfh_dev.indir = (u32 *)rss_config; + rxfh_dev.indir_size = dev_indir_size; + ret = ethtool_copy_validate_indir(rxfh_dev.indir, + useraddr + rss_cfg_offset, + num_rx_rings, + rxfh.indir_size); + if (ret) + goto out_free; + } else if (rxfh.indir_size == 0) { + if (rxfh.rss_context == 0) { + u32 *indir; + + rxfh_dev.indir = (u32 *)rss_config; + rxfh_dev.indir_size = dev_indir_size; + indir = rxfh_dev.indir; + for (i = 0; i < dev_indir_size; i++) + indir[i] = + ethtool_rxfh_indir_default(i, num_rx_rings); + } else { + rxfh_dev.rss_delete = true; + } + } + + if (rxfh.key_size) { + rxfh_dev.key_size = dev_key_size; + rxfh_dev.key = rss_config + indir_bytes; + if (copy_from_user(rxfh_dev.key, + useraddr + rss_cfg_offset + user_indir_len, + rxfh.key_size)) { + ret = -EFAULT; + goto out_free; + } + } + + mutex_lock(&dev->ethtool->rss_lock); + + ret = ethtool_check_flow_types(dev, rxfh.input_xfrm); + if (ret) + goto out_unlock; + + if (rxfh.rss_context && rxfh_dev.rss_delete) { + ret = ethtool_check_rss_ctx_busy(dev, rxfh.rss_context); + if (ret) + goto out_unlock; + } + + if (create) { + u32 limit, ctx_id; + + if (rxfh_dev.rss_delete) { + ret = -EINVAL; + goto out_unlock; + } + ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size); + if (!ctx) { + ret = -ENOMEM; + goto out_unlock; + } + + limit = ops->rxfh_max_num_contexts ?: U32_MAX; + ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx, + XA_LIMIT(1, limit - 1), GFP_KERNEL_ACCOUNT); + if (ret < 0) { + kfree(ctx); + goto out_unlock; + } + WARN_ON(!ctx_id); /* can't happen */ + rxfh.rss_context = ctx_id; + } else if (rxfh.rss_context) { + ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); + if (!ctx) { + ret = -ENOENT; + goto out_unlock; + } + } + rxfh_dev.hfunc = rxfh.hfunc; + rxfh_dev.rss_context = rxfh.rss_context; + rxfh_dev.input_xfrm = rxfh.input_xfrm; + + if (!rxfh.rss_context) { + ntf = ETHTOOL_MSG_RSS_NTF; + ret = ops->set_rxfh(dev, &rxfh_dev, extack); + } else if (create) { + ntf = ETHTOOL_MSG_RSS_CREATE_NTF; + ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev, extack); + /* Make sure driver populates defaults */ + WARN_ON_ONCE(!ret && !rxfh_dev.key && ops->rxfh_per_ctx_key && + !memchr_inv(ethtool_rxfh_context_key(ctx), 0, + ctx->key_size)); + } else if (rxfh_dev.rss_delete) { + ntf = ETHTOOL_MSG_RSS_DELETE_NTF; + ret = ops->remove_rxfh_context(dev, ctx, rxfh.rss_context, + extack); + } else { + ntf = ETHTOOL_MSG_RSS_NTF; + ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev, extack); + } + if (ret) { + ntf = 0; + if (create) { + /* failed to create, free our new tracking entry */ + xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context); + kfree(ctx); + } + goto out_unlock; + } + + if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context), + &rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context))) + ret = -EFAULT; + + if (!rxfh_dev.rss_context) { + /* indicate whether rxfh was set to default */ + if (rxfh.indir_size == 0) + dev->priv_flags &= ~IFF_RXFH_CONFIGURED; + else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) + dev->priv_flags |= IFF_RXFH_CONFIGURED; + } + /* Update rss_ctx tracking */ + if (rxfh_dev.rss_delete) { + WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx); + kfree(ctx); + } else if (ctx) { + if (rxfh_dev.indir) { + for (i = 0; i < dev_indir_size; i++) + ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i]; + ctx->indir_configured = + rxfh.indir_size && + rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE; + } + if (rxfh_dev.key) { + memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key, + dev_key_size); + ctx->key_configured = !!rxfh.key_size; + } + if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE) + ctx->hfunc = rxfh_dev.hfunc; + if (rxfh_dev.input_xfrm != RXH_XFRM_NO_CHANGE) + ctx->input_xfrm = rxfh_dev.input_xfrm; + } + +out_unlock: + mutex_unlock(&dev->ethtool->rss_lock); +out_free: + kfree(rss_config); + if (ntf) + ethtool_rss_notify(dev, ntf, rxfh.rss_context); + return ret; +} + +static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_regs regs; + const struct ethtool_ops *ops = dev->ethtool_ops; + void *regbuf; + int reglen, ret; + + if (!ops->get_regs || !ops->get_regs_len) + return -EOPNOTSUPP; + + if (copy_from_user(®s, useraddr, sizeof(regs))) + return -EFAULT; + + reglen = ops->get_regs_len(dev); + if (reglen <= 0) + return reglen; + + if (regs.len > reglen) + regs.len = reglen; + + regbuf = vzalloc(reglen); + if (!regbuf) + return -ENOMEM; + + if (regs.len < reglen) + reglen = regs.len; + + ops->get_regs(dev, ®s, regbuf); + + ret = -EFAULT; + if (copy_to_user(useraddr, ®s, sizeof(regs))) + goto out; + useraddr += offsetof(struct ethtool_regs, data); + if (copy_to_user(useraddr, regbuf, reglen)) + goto out; + ret = 0; + + out: + vfree(regbuf); + return ret; +} + +static int ethtool_reset(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_value reset; + int ret; + + if (!dev->ethtool_ops->reset) + return -EOPNOTSUPP; + + if (dev->ethtool->module_fw_flash_in_progress) + return -EBUSY; + + if (copy_from_user(&reset, useraddr, sizeof(reset))) + return -EFAULT; + + ret = dev->ethtool_ops->reset(dev, &reset.data); + if (ret) + return ret; + + if (copy_to_user(useraddr, &reset, sizeof(reset))) + return -EFAULT; + return 0; +} + +static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_wolinfo wol; + + if (!dev->ethtool_ops->get_wol) + return -EOPNOTSUPP; + + memset(&wol, 0, sizeof(struct ethtool_wolinfo)); + wol.cmd = ETHTOOL_GWOL; + dev->ethtool_ops->get_wol(dev, &wol); + + if (copy_to_user(useraddr, &wol, sizeof(wol))) + return -EFAULT; + return 0; +} + +static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_wolinfo wol, cur_wol; + int ret; + + if (!dev->ethtool_ops->get_wol || !dev->ethtool_ops->set_wol) + return -EOPNOTSUPP; + + memset(&cur_wol, 0, sizeof(struct ethtool_wolinfo)); + cur_wol.cmd = ETHTOOL_GWOL; + dev->ethtool_ops->get_wol(dev, &cur_wol); + + if (copy_from_user(&wol, useraddr, sizeof(wol))) + return -EFAULT; + + if (wol.wolopts & ~cur_wol.supported) + return -EINVAL; + + if (wol.wolopts == cur_wol.wolopts && + !memcmp(wol.sopass, cur_wol.sopass, sizeof(wol.sopass))) + return 0; + + ret = dev->ethtool_ops->set_wol(dev, &wol); + if (ret) + return ret; + + dev->ethtool->wol_enabled = !!wol.wolopts; + ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF); + + return 0; +} + +static void eee_to_keee(struct ethtool_keee *keee, + const struct ethtool_eee *eee) +{ + memset(keee, 0, sizeof(*keee)); + + keee->eee_enabled = eee->eee_enabled; + keee->tx_lpi_enabled = eee->tx_lpi_enabled; + keee->tx_lpi_timer = eee->tx_lpi_timer; + + ethtool_convert_legacy_u32_to_link_mode(keee->advertised, + eee->advertised); +} + +static void keee_to_eee(struct ethtool_eee *eee, + const struct ethtool_keee *keee) +{ + bool overflow; + + memset(eee, 0, sizeof(*eee)); + + eee->eee_active = keee->eee_active; + eee->eee_enabled = keee->eee_enabled; + eee->tx_lpi_enabled = keee->tx_lpi_enabled; + eee->tx_lpi_timer = keee->tx_lpi_timer; + + overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported, + keee->supported); + ethtool_convert_link_mode_to_legacy_u32(&eee->advertised, + keee->advertised); + ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised, + keee->lp_advertised); + if (overflow) + pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n"); +} + +static int ethtool_get_eee(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_keee keee; + struct ethtool_eee eee; + int rc; + + if (!dev->ethtool_ops->get_eee) + return -EOPNOTSUPP; + + memset(&keee, 0, sizeof(keee)); + rc = dev->ethtool_ops->get_eee(dev, &keee); + if (rc) + return rc; + + keee_to_eee(&eee, &keee); + if (copy_to_user(useraddr, &eee, sizeof(eee))) + return -EFAULT; + + return 0; +} + +static int ethtool_set_eee(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_keee keee; + struct ethtool_eee eee; + int ret; + + if (!dev->ethtool_ops->set_eee) + return -EOPNOTSUPP; + + if (copy_from_user(&eee, useraddr, sizeof(eee))) + return -EFAULT; + + eee_to_keee(&keee, &eee); + ret = dev->ethtool_ops->set_eee(dev, &keee); + if (!ret) + ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF); + return ret; +} + +static int ethtool_nway_reset(struct net_device *dev) +{ + if (!dev->ethtool_ops->nway_reset) + return -EOPNOTSUPP; + + return dev->ethtool_ops->nway_reset(dev); +} + +static int ethtool_get_link(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_value edata = { .cmd = ETHTOOL_GLINK }; + int link = __ethtool_get_link(dev); + + if (link < 0) + return link; + + edata.data = link; + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr, + int (*getter)(struct net_device *, + struct ethtool_eeprom *, u8 *), + u32 total_len) +{ + struct ethtool_eeprom eeprom; + void __user *userbuf = useraddr + sizeof(eeprom); + u32 bytes_remaining; + u8 *data; + int ret = 0; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > total_len) + return -EINVAL; + + data = kzalloc(PAGE_SIZE, GFP_USER); + if (!data) + return -ENOMEM; + + bytes_remaining = eeprom.len; + while (bytes_remaining > 0) { + eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); + + ret = getter(dev, &eeprom, data); + if (ret) + break; + if (!eeprom.len) { + ret = -EIO; + break; + } + if (copy_to_user(userbuf, data, eeprom.len)) { + ret = -EFAULT; + break; + } + userbuf += eeprom.len; + eeprom.offset += eeprom.len; + bytes_remaining -= eeprom.len; + } + + eeprom.len = userbuf - (useraddr + sizeof(eeprom)); + eeprom.offset -= eeprom.len; + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) + ret = -EFAULT; + + kfree(data); + return ret; +} + +static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_eeprom || !ops->get_eeprom_len || + !ops->get_eeprom_len(dev)) + return -EOPNOTSUPP; + + return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, + ops->get_eeprom_len(dev)); +} + +static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = dev->ethtool_ops; + void __user *userbuf = useraddr + sizeof(eeprom); + u32 bytes_remaining; + u8 *data; + int ret = 0; + + if (!ops->set_eeprom || !ops->get_eeprom_len || + !ops->get_eeprom_len(dev)) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kzalloc(PAGE_SIZE, GFP_USER); + if (!data) + return -ENOMEM; + + bytes_remaining = eeprom.len; + while (bytes_remaining > 0) { + eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); + + if (copy_from_user(data, userbuf, eeprom.len)) { + ret = -EFAULT; + break; + } + ret = ops->set_eeprom(dev, &eeprom, data); + if (ret) + break; + userbuf += eeprom.len; + eeprom.offset += eeprom.len; + bytes_remaining -= eeprom.len; + } + + kfree(data); + return ret; +} + +static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; + struct kernel_ethtool_coalesce kernel_coalesce = {}; + int ret; + + if (!dev->ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, + NULL); + if (ret) + return ret; + + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) + return -EFAULT; + return 0; +} + +static bool +ethtool_set_coalesce_supported(struct net_device *dev, + struct ethtool_coalesce *coalesce) +{ + u32 supported_params = dev->ethtool_ops->supported_coalesce_params; + u32 nonzero_params = 0; + + if (coalesce->rx_coalesce_usecs) + nonzero_params |= ETHTOOL_COALESCE_RX_USECS; + if (coalesce->rx_max_coalesced_frames) + nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES; + if (coalesce->rx_coalesce_usecs_irq) + nonzero_params |= ETHTOOL_COALESCE_RX_USECS_IRQ; + if (coalesce->rx_max_coalesced_frames_irq) + nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ; + if (coalesce->tx_coalesce_usecs) + nonzero_params |= ETHTOOL_COALESCE_TX_USECS; + if (coalesce->tx_max_coalesced_frames) + nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES; + if (coalesce->tx_coalesce_usecs_irq) + nonzero_params |= ETHTOOL_COALESCE_TX_USECS_IRQ; + if (coalesce->tx_max_coalesced_frames_irq) + nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ; + if (coalesce->stats_block_coalesce_usecs) + nonzero_params |= ETHTOOL_COALESCE_STATS_BLOCK_USECS; + if (coalesce->use_adaptive_rx_coalesce) + nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_RX; + if (coalesce->use_adaptive_tx_coalesce) + nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_TX; + if (coalesce->pkt_rate_low) + nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_LOW; + if (coalesce->rx_coalesce_usecs_low) + nonzero_params |= ETHTOOL_COALESCE_RX_USECS_LOW; + if (coalesce->rx_max_coalesced_frames_low) + nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW; + if (coalesce->tx_coalesce_usecs_low) + nonzero_params |= ETHTOOL_COALESCE_TX_USECS_LOW; + if (coalesce->tx_max_coalesced_frames_low) + nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW; + if (coalesce->pkt_rate_high) + nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_HIGH; + if (coalesce->rx_coalesce_usecs_high) + nonzero_params |= ETHTOOL_COALESCE_RX_USECS_HIGH; + if (coalesce->rx_max_coalesced_frames_high) + nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH; + if (coalesce->tx_coalesce_usecs_high) + nonzero_params |= ETHTOOL_COALESCE_TX_USECS_HIGH; + if (coalesce->tx_max_coalesced_frames_high) + nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH; + if (coalesce->rate_sample_interval) + nonzero_params |= ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL; + + return (supported_params & nonzero_params) == nonzero_params; +} + +static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, + void __user *useraddr) +{ + struct kernel_ethtool_coalesce kernel_coalesce = {}; + struct ethtool_coalesce coalesce; + int ret; + + if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, + NULL); + if (ret) + return ret; + + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) + return -EFAULT; + + if (!ethtool_set_coalesce_supported(dev, &coalesce)) + return -EOPNOTSUPP; + + ret = dev->ethtool_ops->set_coalesce(dev, &coalesce, &kernel_coalesce, + NULL); + if (!ret) + ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF); + return ret; +} + +static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; + struct kernel_ethtool_ringparam kernel_ringparam = {}; + + if (!dev->ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + dev->ethtool_ops->get_ringparam(dev, &ringparam, + &kernel_ringparam, NULL); + + if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) +{ + struct kernel_ethtool_ringparam kernel_ringparam; + struct ethtool_ringparam ringparam, max; + int ret; + + if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) + return -EFAULT; + + ethtool_ringparam_get_cfg(dev, &max, &kernel_ringparam, NULL); + + /* ensure new ring parameters are within the maximums */ + if (ringparam.rx_pending > max.rx_max_pending || + ringparam.rx_mini_pending > max.rx_mini_max_pending || + ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending || + ringparam.tx_pending > max.tx_max_pending) + return -EINVAL; + + ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, + &kernel_ringparam, NULL); + if (!ret) + ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF); + return ret; +} + +static noinline_for_stack int ethtool_get_channels(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; + + if (!dev->ethtool_ops->get_channels) + return -EOPNOTSUPP; + + dev->ethtool_ops->get_channels(dev, &channels); + + if (copy_to_user(useraddr, &channels, sizeof(channels))) + return -EFAULT; + return 0; +} + +static noinline_for_stack int ethtool_set_channels(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS }; + u16 from_channel, to_channel; + unsigned int i; + int ret; + + if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) + return -EOPNOTSUPP; + + if (copy_from_user(&channels, useraddr, sizeof(channels))) + return -EFAULT; + + dev->ethtool_ops->get_channels(dev, &curr); + + if (channels.rx_count == curr.rx_count && + channels.tx_count == curr.tx_count && + channels.combined_count == curr.combined_count && + channels.other_count == curr.other_count) + return 0; + + /* ensure new counts are within the maximums */ + if (channels.rx_count > curr.max_rx || + channels.tx_count > curr.max_tx || + channels.combined_count > curr.max_combined || + channels.other_count > curr.max_other) + return -EINVAL; + + /* ensure there is at least one RX and one TX channel */ + if (!channels.combined_count && + (!channels.rx_count || !channels.tx_count)) + return -EINVAL; + + ret = ethtool_check_max_channel(dev, channels, NULL); + if (ret) + return ret; + + /* Disabling channels, query zero-copy AF_XDP sockets */ + from_channel = channels.combined_count + + min(channels.rx_count, channels.tx_count); + to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count); + for (i = from_channel; i < to_channel; i++) + if (xsk_get_pool_from_qid(dev, i)) + return -EINVAL; + + ret = dev->ethtool_ops->set_channels(dev, &channels); + if (!ret) + ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF); + return ret; +} + +static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_pauseparam pauseparam = { .cmd = ETHTOOL_GPAUSEPARAM }; + + if (!dev->ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + dev->ethtool_ops->get_pauseparam(dev, &pauseparam); + + if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_pauseparam pauseparam; + int ret; + + if (!dev->ethtool_ops->set_pauseparam) + return -EOPNOTSUPP; + + if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) + return -EFAULT; + + ret = dev->ethtool_ops->set_pauseparam(dev, &pauseparam); + if (!ret) + ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF); + return ret; +} + +static int ethtool_self_test(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_test test; + const struct ethtool_ops *ops = dev->ethtool_ops; + u64 *data; + int ret, test_len; + + if (!ops->self_test || !ops->get_sset_count) + return -EOPNOTSUPP; + + test_len = ops->get_sset_count(dev, ETH_SS_TEST); + if (test_len < 0) + return test_len; + WARN_ON(test_len == 0); + + if (copy_from_user(&test, useraddr, sizeof(test))) + return -EFAULT; + + test.len = test_len; + data = kcalloc(test_len, sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + netif_testing_on(dev); + ops->self_test(dev, &test, data); + netif_testing_off(dev); + + ret = -EFAULT; + if (copy_to_user(useraddr, &test, sizeof(test))) + goto out; + useraddr += sizeof(test); + if (copy_to_user(useraddr, data, array_size(test.len, sizeof(u64)))) + goto out; + ret = 0; + + out: + kfree(data); + return ret; +} + +static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_gstrings gstrings; + u8 *data; + int ret; + + if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) + return -EFAULT; + + ret = __ethtool_get_sset_count(dev, gstrings.string_set); + if (ret < 0) + return ret; + if (ret > S32_MAX / ETH_GSTRING_LEN) + return -ENOMEM; + WARN_ON_ONCE(!ret); + + gstrings.len = ret; + + if (gstrings.len) { + data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); + if (!data) + return -ENOMEM; + + __ethtool_get_strings(dev, gstrings.string_set, data); + } else { + data = NULL; + } + + ret = -EFAULT; + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) + goto out; + useraddr += sizeof(gstrings); + if (gstrings.len && + copy_to_user(useraddr, data, + array_size(gstrings.len, ETH_GSTRING_LEN))) + goto out; + ret = 0; + +out: + vfree(data); + return ret; +} + +__printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(*data, ETH_GSTRING_LEN, fmt, args); + va_end(args); + + *data += ETH_GSTRING_LEN; +} +EXPORT_SYMBOL(ethtool_sprintf); + +void ethtool_puts(u8 **data, const char *str) +{ + strscpy(*data, str, ETH_GSTRING_LEN); + *data += ETH_GSTRING_LEN; +} +EXPORT_SYMBOL(ethtool_puts); + +static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_value id; + static bool busy; + const struct ethtool_ops *ops = dev->ethtool_ops; + netdevice_tracker dev_tracker; + int rc; + + if (!ops->set_phys_id) + return -EOPNOTSUPP; + + if (busy) + return -EBUSY; + + if (copy_from_user(&id, useraddr, sizeof(id))) + return -EFAULT; + + rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); + if (rc < 0) + return rc; + + /* Drop the RTNL lock while waiting, but prevent reentry or + * removal of the device. + */ + busy = true; + netdev_hold(dev, &dev_tracker, GFP_KERNEL); + netdev_unlock_ops(dev); + rtnl_unlock(); + + if (rc == 0) { + /* Driver will handle this itself */ + schedule_timeout_interruptible( + id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT); + } else { + /* Driver expects to be called at twice the frequency in rc */ + int n = rc * 2, interval = HZ / n; + u64 count = mul_u32_u32(n, id.data); + u64 i = 0; + + do { + rtnl_lock(); + netdev_lock_ops(dev); + rc = ops->set_phys_id(dev, + (i++ & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); + netdev_unlock_ops(dev); + rtnl_unlock(); + if (rc) + break; + schedule_timeout_interruptible(interval); + } while (!signal_pending(current) && (!id.data || i < count)); + } + + rtnl_lock(); + netdev_lock_ops(dev); + netdev_put(dev, &dev_tracker); + busy = false; + + (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); + return rc; +} + +static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_stats stats; + const struct ethtool_ops *ops = dev->ethtool_ops; + u64 *data; + int ret, n_stats; + + if (!ops->get_ethtool_stats || !ops->get_sset_count) + return -EOPNOTSUPP; + + n_stats = ops->get_sset_count(dev, ETH_SS_STATS); + if (n_stats < 0) + return n_stats; + if (n_stats > S32_MAX / sizeof(u64)) + return -ENOMEM; + WARN_ON_ONCE(!n_stats); + if (copy_from_user(&stats, useraddr, sizeof(stats))) + return -EFAULT; + + stats.n_stats = n_stats; + + if (n_stats) { + data = vzalloc(array_size(n_stats, sizeof(u64))); + if (!data) + return -ENOMEM; + ops->get_ethtool_stats(dev, &stats, data); + } else { + data = NULL; + } + + ret = -EFAULT; + if (copy_to_user(useraddr, &stats, sizeof(stats))) + goto out; + useraddr += sizeof(stats); + if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64)))) + goto out; + ret = 0; + + out: + vfree(data); + return ret; +} + +static int ethtool_vzalloc_stats_array(int n_stats, u64 **data) +{ + if (n_stats < 0) + return n_stats; + if (n_stats > S32_MAX / sizeof(u64)) + return -ENOMEM; + if (WARN_ON_ONCE(!n_stats)) + return -EOPNOTSUPP; + + *data = vzalloc(array_size(n_stats, sizeof(u64))); + if (!*data) + return -ENOMEM; + + return 0; +} + +static int ethtool_get_phy_stats_phydev(struct phy_device *phydev, + struct ethtool_stats *stats, + u64 **data) + { + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; + int n_stats, ret; + + if (!phy_ops || !phy_ops->get_sset_count || !phy_ops->get_stats) + return -EOPNOTSUPP; + + n_stats = phy_ops->get_sset_count(phydev); + + ret = ethtool_vzalloc_stats_array(n_stats, data); + if (ret) + return ret; + + stats->n_stats = n_stats; + return phy_ops->get_stats(phydev, stats, *data); +} + +static int ethtool_get_phy_stats_ethtool(struct net_device *dev, + struct ethtool_stats *stats, + u64 **data) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + int n_stats, ret; + + if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats) + return -EOPNOTSUPP; + + n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS); + + ret = ethtool_vzalloc_stats_array(n_stats, data); + if (ret) + return ret; + + stats->n_stats = n_stats; + ops->get_ethtool_phy_stats(dev, stats, *data); + + return 0; +} + +static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) +{ + struct phy_device *phydev = dev->phydev; + struct ethtool_stats stats; + u64 *data = NULL; + int ret = -EOPNOTSUPP; + + if (copy_from_user(&stats, useraddr, sizeof(stats))) + return -EFAULT; + + if (phydev) + ret = ethtool_get_phy_stats_phydev(phydev, &stats, &data); + + if (ret == -EOPNOTSUPP) + ret = ethtool_get_phy_stats_ethtool(dev, &stats, &data); + + if (ret) + goto out; + + if (copy_to_user(useraddr, &stats, sizeof(stats))) { + ret = -EFAULT; + goto out; + } + + useraddr += sizeof(stats); + if (copy_to_user(useraddr, data, array_size(stats.n_stats, sizeof(u64)))) + ret = -EFAULT; + + out: + vfree(data); + return ret; +} + +static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_perm_addr epaddr; + + if (copy_from_user(&epaddr, useraddr, sizeof(epaddr))) + return -EFAULT; + + if (epaddr.size < dev->addr_len) + return -ETOOSMALL; + epaddr.size = dev->addr_len; + + if (copy_to_user(useraddr, &epaddr, sizeof(epaddr))) + return -EFAULT; + useraddr += sizeof(epaddr); + if (copy_to_user(useraddr, dev->perm_addr, epaddr.size)) + return -EFAULT; + return 0; +} + +static int ethtool_get_value(struct net_device *dev, char __user *useraddr, + u32 cmd, u32 (*actor)(struct net_device *)) +{ + struct ethtool_value edata = { .cmd = cmd }; + + if (!actor) + return -EOPNOTSUPP; + + edata.data = actor(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr, + void (*actor)(struct net_device *, u32)) +{ + struct ethtool_value edata; + + if (!actor) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + actor(dev, edata.data); + return 0; +} + +static int ethtool_set_value(struct net_device *dev, char __user *useraddr, + int (*actor)(struct net_device *, u32)) +{ + struct ethtool_value edata; + + if (!actor) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return actor(dev, edata.data); +} + +static int +ethtool_flash_device(struct net_device *dev, struct ethtool_devlink_compat *req) +{ + if (!dev->ethtool_ops->flash_device) { + req->devlink = netdev_to_devlink_get(dev); + return 0; + } + + return dev->ethtool_ops->flash_device(dev, &req->efl); +} + +static int ethtool_set_dump(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_dump dump; + + if (!dev->ethtool_ops->set_dump) + return -EOPNOTSUPP; + + if (copy_from_user(&dump, useraddr, sizeof(dump))) + return -EFAULT; + + return dev->ethtool_ops->set_dump(dev, &dump); +} + +static int ethtool_get_dump_flag(struct net_device *dev, + void __user *useraddr) +{ + int ret; + struct ethtool_dump dump; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_dump_flag) + return -EOPNOTSUPP; + + if (copy_from_user(&dump, useraddr, sizeof(dump))) + return -EFAULT; + + ret = ops->get_dump_flag(dev, &dump); + if (ret) + return ret; + + if (copy_to_user(useraddr, &dump, sizeof(dump))) + return -EFAULT; + return 0; +} + +static int ethtool_get_dump_data(struct net_device *dev, + void __user *useraddr) +{ + int ret; + __u32 len; + struct ethtool_dump dump, tmp; + const struct ethtool_ops *ops = dev->ethtool_ops; + void *data = NULL; + + if (!ops->get_dump_data || !ops->get_dump_flag) + return -EOPNOTSUPP; + + if (copy_from_user(&dump, useraddr, sizeof(dump))) + return -EFAULT; + + memset(&tmp, 0, sizeof(tmp)); + tmp.cmd = ETHTOOL_GET_DUMP_FLAG; + ret = ops->get_dump_flag(dev, &tmp); + if (ret) + return ret; + + len = min(tmp.len, dump.len); + if (!len) + return -EFAULT; + + /* Don't ever let the driver think there's more space available + * than it requested with .get_dump_flag(). + */ + dump.len = len; + + /* Always allocate enough space to hold the whole thing so that the + * driver does not need to check the length and bother with partial + * dumping. + */ + data = vzalloc(tmp.len); + if (!data) + return -ENOMEM; + ret = ops->get_dump_data(dev, &dump, data); + if (ret) + goto out; + + /* There are two sane possibilities: + * 1. The driver's .get_dump_data() does not touch dump.len. + * 2. Or it may set dump.len to how much it really writes, which + * should be tmp.len (or len if it can do a partial dump). + * In any case respond to userspace with the actual length of data + * it's receiving. + */ + WARN_ON(dump.len != len && dump.len != tmp.len); + dump.len = len; + + if (copy_to_user(useraddr, &dump, sizeof(dump))) { + ret = -EFAULT; + goto out; + } + useraddr += offsetof(struct ethtool_dump, data); + if (copy_to_user(useraddr, data, len)) + ret = -EFAULT; +out: + vfree(data); + return ret; +} + +static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) +{ + struct kernel_ethtool_ts_info kernel_info; + struct ethtool_ts_info info = {}; + int err; + + err = __ethtool_get_ts_info(dev, &kernel_info); + if (err) + return err; + + info.cmd = kernel_info.cmd; + info.so_timestamping = kernel_info.so_timestamping; + info.phc_index = kernel_info.phc_index; + info.tx_types = kernel_info.tx_types; + info.rx_filters = kernel_info.rx_filters; + + if (copy_to_user(useraddr, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + +int ethtool_get_module_info_call(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct phy_device *phydev = dev->phydev; + + if (dev->ethtool->module_fw_flash_in_progress) + return -EBUSY; + + if (dev->sfp_bus) + return sfp_get_module_info(dev->sfp_bus, modinfo); + + if (phydev && phydev->drv && phydev->drv->module_info) + return phydev->drv->module_info(phydev, modinfo); + + if (ops->get_module_info) + return ops->get_module_info(dev, modinfo); + + return -EOPNOTSUPP; +} + +static int ethtool_get_module_info(struct net_device *dev, + void __user *useraddr) +{ + int ret; + struct ethtool_modinfo modinfo; + + if (copy_from_user(&modinfo, useraddr, sizeof(modinfo))) + return -EFAULT; + + ret = ethtool_get_module_info_call(dev, &modinfo); + if (ret) + return ret; + + if (copy_to_user(useraddr, &modinfo, sizeof(modinfo))) + return -EFAULT; + + return 0; +} + +int ethtool_get_module_eeprom_call(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct phy_device *phydev = dev->phydev; + + if (dev->ethtool->module_fw_flash_in_progress) + return -EBUSY; + + if (dev->sfp_bus) + return sfp_get_module_eeprom(dev->sfp_bus, ee, data); + + if (phydev && phydev->drv && phydev->drv->module_eeprom) + return phydev->drv->module_eeprom(phydev, ee, data); + + if (ops->get_module_eeprom) + return ops->get_module_eeprom(dev, ee, data); + + return -EOPNOTSUPP; +} + +static int ethtool_get_module_eeprom(struct net_device *dev, + void __user *useraddr) +{ + int ret; + struct ethtool_modinfo modinfo; + + ret = ethtool_get_module_info_call(dev, &modinfo); + if (ret) + return ret; + + return ethtool_get_any_eeprom(dev, useraddr, + ethtool_get_module_eeprom_call, + modinfo.eeprom_len); +} + +static int ethtool_tunable_valid(const struct ethtool_tunable *tuna) +{ + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + case ETHTOOL_TX_COPYBREAK: + case ETHTOOL_TX_COPYBREAK_BUF_SIZE: + if (tuna->len != sizeof(u32) || + tuna->type_id != ETHTOOL_TUNABLE_U32) + return -EINVAL; + break; + case ETHTOOL_PFC_PREVENTION_TOUT: + if (tuna->len != sizeof(u16) || + tuna->type_id != ETHTOOL_TUNABLE_U16) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr) +{ + int ret; + struct ethtool_tunable tuna; + const struct ethtool_ops *ops = dev->ethtool_ops; + void *data; + + if (!ops->get_tunable) + return -EOPNOTSUPP; + if (copy_from_user(&tuna, useraddr, sizeof(tuna))) + return -EFAULT; + ret = ethtool_tunable_valid(&tuna); + if (ret) + return ret; + data = kzalloc(tuna.len, GFP_USER); + if (!data) + return -ENOMEM; + ret = ops->get_tunable(dev, &tuna, data); + if (ret) + goto out; + useraddr += sizeof(tuna); + ret = -EFAULT; + if (copy_to_user(useraddr, data, tuna.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr) +{ + int ret; + struct ethtool_tunable tuna; + const struct ethtool_ops *ops = dev->ethtool_ops; + void *data; + + if (!ops->set_tunable) + return -EOPNOTSUPP; + if (copy_from_user(&tuna, useraddr, sizeof(tuna))) + return -EFAULT; + ret = ethtool_tunable_valid(&tuna); + if (ret) + return ret; + useraddr += sizeof(tuna); + data = memdup_user(useraddr, tuna.len); + if (IS_ERR(data)) + return PTR_ERR(data); + ret = ops->set_tunable(dev, &tuna, data); + + kfree(data); + return ret; +} + +static noinline_for_stack int +ethtool_get_per_queue_coalesce(struct net_device *dev, + void __user *useraddr, + struct ethtool_per_queue_op *per_queue_opt) +{ + u32 bit; + int ret; + DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); + + if (!dev->ethtool_ops->get_per_queue_coalesce) + return -EOPNOTSUPP; + + useraddr += sizeof(*per_queue_opt); + + bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, + MAX_NUM_QUEUE); + + for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { + struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; + + ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, &coalesce); + if (ret != 0) + return ret; + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) + return -EFAULT; + useraddr += sizeof(coalesce); + } + + return 0; +} + +static noinline_for_stack int +ethtool_set_per_queue_coalesce(struct net_device *dev, + void __user *useraddr, + struct ethtool_per_queue_op *per_queue_opt) +{ + u32 bit; + int i, ret = 0; + int n_queue; + struct ethtool_coalesce *backup = NULL, *tmp = NULL; + DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); + + if ((!dev->ethtool_ops->set_per_queue_coalesce) || + (!dev->ethtool_ops->get_per_queue_coalesce)) + return -EOPNOTSUPP; + + useraddr += sizeof(*per_queue_opt); + + bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE); + n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE); + tmp = backup = kmalloc_array(n_queue, sizeof(*backup), GFP_KERNEL); + if (!backup) + return -ENOMEM; + + for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { + struct ethtool_coalesce coalesce; + + ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, tmp); + if (ret != 0) + goto roll_back; + + tmp++; + + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) { + ret = -EFAULT; + goto roll_back; + } + + if (!ethtool_set_coalesce_supported(dev, &coalesce)) { + ret = -EOPNOTSUPP; + goto roll_back; + } + + ret = dev->ethtool_ops->set_per_queue_coalesce(dev, bit, &coalesce); + if (ret != 0) + goto roll_back; + + useraddr += sizeof(coalesce); + } + +roll_back: + if (ret != 0) { + tmp = backup; + for_each_set_bit(i, queue_mask, bit) { + dev->ethtool_ops->set_per_queue_coalesce(dev, i, tmp); + tmp++; + } + } + kfree(backup); + + return ret; +} + +static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev, + void __user *useraddr, u32 sub_cmd) +{ + struct ethtool_per_queue_op per_queue_opt; + + if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt))) + return -EFAULT; + + if (per_queue_opt.sub_command != sub_cmd) + return -EINVAL; + + switch (per_queue_opt.sub_command) { + case ETHTOOL_GCOALESCE: + return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt); + case ETHTOOL_SCOALESCE: + return ethtool_set_per_queue_coalesce(dev, useraddr, &per_queue_opt); + default: + return -EOPNOTSUPP; + } +} + +static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna) +{ + switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + case ETHTOOL_PHY_FAST_LINK_DOWN: + if (tuna->len != sizeof(u8) || + tuna->type_id != ETHTOOL_TUNABLE_U8) + return -EINVAL; + break; + case ETHTOOL_PHY_EDPD: + if (tuna->len != sizeof(u16) || + tuna->type_id != ETHTOOL_TUNABLE_U16) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int get_phy_tunable(struct net_device *dev, void __user *useraddr) +{ + struct phy_device *phydev = dev->phydev; + struct ethtool_tunable tuna; + bool phy_drv_tunable; + void *data; + int ret; + + phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable; + if (!phy_drv_tunable && !dev->ethtool_ops->get_phy_tunable) + return -EOPNOTSUPP; + if (copy_from_user(&tuna, useraddr, sizeof(tuna))) + return -EFAULT; + ret = ethtool_phy_tunable_valid(&tuna); + if (ret) + return ret; + data = kzalloc(tuna.len, GFP_USER); + if (!data) + return -ENOMEM; + if (phy_drv_tunable) { + mutex_lock(&phydev->lock); + ret = phydev->drv->get_tunable(phydev, &tuna, data); + mutex_unlock(&phydev->lock); + } else { + ret = dev->ethtool_ops->get_phy_tunable(dev, &tuna, data); + } + if (ret) + goto out; + useraddr += sizeof(tuna); + ret = -EFAULT; + if (copy_to_user(useraddr, data, tuna.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int set_phy_tunable(struct net_device *dev, void __user *useraddr) +{ + struct phy_device *phydev = dev->phydev; + struct ethtool_tunable tuna; + bool phy_drv_tunable; + void *data; + int ret; + + phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable; + if (!phy_drv_tunable && !dev->ethtool_ops->set_phy_tunable) + return -EOPNOTSUPP; + if (copy_from_user(&tuna, useraddr, sizeof(tuna))) + return -EFAULT; + ret = ethtool_phy_tunable_valid(&tuna); + if (ret) + return ret; + useraddr += sizeof(tuna); + data = memdup_user(useraddr, tuna.len); + if (IS_ERR(data)) + return PTR_ERR(data); + if (phy_drv_tunable) { + mutex_lock(&phydev->lock); + ret = phydev->drv->set_tunable(phydev, &tuna, data); + mutex_unlock(&phydev->lock); + } else { + ret = dev->ethtool_ops->set_phy_tunable(dev, &tuna, data); + } + + kfree(data); + return ret; +} + +static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_fecparam fecparam = { .cmd = ETHTOOL_GFECPARAM }; + int rc; + + if (!dev->ethtool_ops->get_fecparam) + return -EOPNOTSUPP; + + rc = dev->ethtool_ops->get_fecparam(dev, &fecparam); + if (rc) + return rc; + + if (WARN_ON_ONCE(fecparam.reserved)) + fecparam.reserved = 0; + + if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_fecparam(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_fecparam fecparam; + + if (!dev->ethtool_ops->set_fecparam) + return -EOPNOTSUPP; + + if (copy_from_user(&fecparam, useraddr, sizeof(fecparam))) + return -EFAULT; + + if (!fecparam.fec || fecparam.fec & ETHTOOL_FEC_NONE) + return -EINVAL; + + fecparam.active_fec = 0; + fecparam.reserved = 0; + + return dev->ethtool_ops->set_fecparam(dev, &fecparam); +} + +/* The main entry point in this file. Called from net/core/dev_ioctl.c */ + +static int +__dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr, + u32 ethcmd, struct ethtool_devlink_compat *devlink_state) +{ + struct net_device *dev; + u32 sub_cmd; + int rc; + netdev_features_t old_features; + + dev = __dev_get_by_name(net, ifr->ifr_name); + if (!dev) + return -ENODEV; + + if (ethcmd == ETHTOOL_PERQUEUE) { + if (copy_from_user(&sub_cmd, useraddr + sizeof(ethcmd), sizeof(sub_cmd))) + return -EFAULT; + } else { + sub_cmd = ethcmd; + } + /* Allow some commands to be done by anyone */ + switch (sub_cmd) { + case ETHTOOL_GSET: + case ETHTOOL_GDRVINFO: + case ETHTOOL_GMSGLVL: + case ETHTOOL_GLINK: + case ETHTOOL_GCOALESCE: + case ETHTOOL_GRINGPARAM: + case ETHTOOL_GPAUSEPARAM: + case ETHTOOL_GRXCSUM: + case ETHTOOL_GTXCSUM: + case ETHTOOL_GSG: + case ETHTOOL_GSSET_INFO: + case ETHTOOL_GSTRINGS: + case ETHTOOL_GSTATS: + case ETHTOOL_GPHYSTATS: + case ETHTOOL_GTSO: + case ETHTOOL_GPERMADDR: + case ETHTOOL_GUFO: + case ETHTOOL_GGSO: + case ETHTOOL_GGRO: + case ETHTOOL_GFLAGS: + case ETHTOOL_GPFLAGS: + case ETHTOOL_GRXFH: + case ETHTOOL_GRXRINGS: + case ETHTOOL_GRXCLSRLCNT: + case ETHTOOL_GRXCLSRULE: + case ETHTOOL_GRXCLSRLALL: + case ETHTOOL_GRXFHINDIR: + case ETHTOOL_GRSSH: + case ETHTOOL_GFEATURES: + case ETHTOOL_GCHANNELS: + case ETHTOOL_GET_TS_INFO: + case ETHTOOL_GEEE: + case ETHTOOL_GTUNABLE: + case ETHTOOL_PHY_GTUNABLE: + case ETHTOOL_GLINKSETTINGS: + case ETHTOOL_GFECPARAM: + break; + default: + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + } + + netdev_lock_ops(dev); + if (dev->dev.parent) + pm_runtime_get_sync(dev->dev.parent); + + if (!netif_device_present(dev)) { + rc = -ENODEV; + goto out; + } + + if (dev->ethtool_ops->begin) { + rc = dev->ethtool_ops->begin(dev); + if (rc < 0) + goto out; + } + old_features = dev->features; + + switch (ethcmd) { + case ETHTOOL_GSET: + rc = ethtool_get_settings(dev, useraddr); + break; + case ETHTOOL_SSET: + rc = ethtool_set_settings(dev, useraddr); + break; + case ETHTOOL_GDRVINFO: + rc = ethtool_get_drvinfo(dev, devlink_state); + break; + case ETHTOOL_GREGS: + rc = ethtool_get_regs(dev, useraddr); + break; + case ETHTOOL_GWOL: + rc = ethtool_get_wol(dev, useraddr); + break; + case ETHTOOL_SWOL: + rc = ethtool_set_wol(dev, useraddr); + break; + case ETHTOOL_GMSGLVL: + rc = ethtool_get_value(dev, useraddr, ethcmd, + dev->ethtool_ops->get_msglevel); + break; + case ETHTOOL_SMSGLVL: + rc = ethtool_set_value_void(dev, useraddr, + dev->ethtool_ops->set_msglevel); + if (!rc) + ethtool_notify(dev, ETHTOOL_MSG_DEBUG_NTF); + break; + case ETHTOOL_GEEE: + rc = ethtool_get_eee(dev, useraddr); + break; + case ETHTOOL_SEEE: + rc = ethtool_set_eee(dev, useraddr); + break; + case ETHTOOL_NWAY_RST: + rc = ethtool_nway_reset(dev); + break; + case ETHTOOL_GLINK: + rc = ethtool_get_link(dev, useraddr); + break; + case ETHTOOL_GEEPROM: + rc = ethtool_get_eeprom(dev, useraddr); + break; + case ETHTOOL_SEEPROM: + rc = ethtool_set_eeprom(dev, useraddr); + break; + case ETHTOOL_GCOALESCE: + rc = ethtool_get_coalesce(dev, useraddr); + break; + case ETHTOOL_SCOALESCE: + rc = ethtool_set_coalesce(dev, useraddr); + break; + case ETHTOOL_GRINGPARAM: + rc = ethtool_get_ringparam(dev, useraddr); + break; + case ETHTOOL_SRINGPARAM: + rc = ethtool_set_ringparam(dev, useraddr); + break; + case ETHTOOL_GPAUSEPARAM: + rc = ethtool_get_pauseparam(dev, useraddr); + break; + case ETHTOOL_SPAUSEPARAM: + rc = ethtool_set_pauseparam(dev, useraddr); + break; + case ETHTOOL_TEST: + rc = ethtool_self_test(dev, useraddr); + break; + case ETHTOOL_GSTRINGS: + rc = ethtool_get_strings(dev, useraddr); + break; + case ETHTOOL_PHYS_ID: + rc = ethtool_phys_id(dev, useraddr); + break; + case ETHTOOL_GSTATS: + rc = ethtool_get_stats(dev, useraddr); + break; + case ETHTOOL_GPERMADDR: + rc = ethtool_get_perm_addr(dev, useraddr); + break; + case ETHTOOL_GFLAGS: + rc = ethtool_get_value(dev, useraddr, ethcmd, + __ethtool_get_flags); + break; + case ETHTOOL_SFLAGS: + rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags); + break; + case ETHTOOL_GPFLAGS: + rc = ethtool_get_value(dev, useraddr, ethcmd, + dev->ethtool_ops->get_priv_flags); + if (!rc) + ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF); + break; + case ETHTOOL_SPFLAGS: + rc = ethtool_set_value(dev, useraddr, + dev->ethtool_ops->set_priv_flags); + break; + case ETHTOOL_GRXFH: + rc = ethtool_get_rxfh_fields(dev, ethcmd, useraddr); + break; + case ETHTOOL_SRXFH: + rc = ethtool_set_rxfh_fields(dev, ethcmd, useraddr); + break; + case ETHTOOL_GRXRINGS: + rc = ethtool_get_rxrings(dev, ethcmd, useraddr); + break; + case ETHTOOL_GRXCLSRLCNT: + case ETHTOOL_GRXCLSRULE: + case ETHTOOL_GRXCLSRLALL: + rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); + break; + case ETHTOOL_SRXCLSRLDEL: + case ETHTOOL_SRXCLSRLINS: + rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); + break; + case ETHTOOL_FLASHDEV: + rc = ethtool_flash_device(dev, devlink_state); + break; + case ETHTOOL_RESET: + rc = ethtool_reset(dev, useraddr); + break; + case ETHTOOL_GSSET_INFO: + rc = ethtool_get_sset_info(dev, useraddr); + break; + case ETHTOOL_GRXFHINDIR: + rc = ethtool_get_rxfh_indir(dev, useraddr); + break; + case ETHTOOL_SRXFHINDIR: + rc = ethtool_set_rxfh_indir(dev, useraddr); + break; + case ETHTOOL_GRSSH: + rc = ethtool_get_rxfh(dev, useraddr); + break; + case ETHTOOL_SRSSH: + rc = ethtool_set_rxfh(dev, useraddr); + break; + case ETHTOOL_GFEATURES: + rc = ethtool_get_features(dev, useraddr); + break; + case ETHTOOL_SFEATURES: + rc = ethtool_set_features(dev, useraddr); + break; + case ETHTOOL_GTXCSUM: + case ETHTOOL_GRXCSUM: + case ETHTOOL_GSG: + case ETHTOOL_GTSO: + case ETHTOOL_GGSO: + case ETHTOOL_GGRO: + rc = ethtool_get_one_feature(dev, useraddr, ethcmd); + break; + case ETHTOOL_STXCSUM: + case ETHTOOL_SRXCSUM: + case ETHTOOL_SSG: + case ETHTOOL_STSO: + case ETHTOOL_SGSO: + case ETHTOOL_SGRO: + rc = ethtool_set_one_feature(dev, useraddr, ethcmd); + break; + case ETHTOOL_GCHANNELS: + rc = ethtool_get_channels(dev, useraddr); + break; + case ETHTOOL_SCHANNELS: + rc = ethtool_set_channels(dev, useraddr); + break; + case ETHTOOL_SET_DUMP: + rc = ethtool_set_dump(dev, useraddr); + break; + case ETHTOOL_GET_DUMP_FLAG: + rc = ethtool_get_dump_flag(dev, useraddr); + break; + case ETHTOOL_GET_DUMP_DATA: + rc = ethtool_get_dump_data(dev, useraddr); + break; + case ETHTOOL_GET_TS_INFO: + rc = ethtool_get_ts_info(dev, useraddr); + break; + case ETHTOOL_GMODULEINFO: + rc = ethtool_get_module_info(dev, useraddr); + break; + case ETHTOOL_GMODULEEEPROM: + rc = ethtool_get_module_eeprom(dev, useraddr); + break; + case ETHTOOL_GTUNABLE: + rc = ethtool_get_tunable(dev, useraddr); + break; + case ETHTOOL_STUNABLE: + rc = ethtool_set_tunable(dev, useraddr); + break; + case ETHTOOL_GPHYSTATS: + rc = ethtool_get_phy_stats(dev, useraddr); + break; + case ETHTOOL_PERQUEUE: + rc = ethtool_set_per_queue(dev, useraddr, sub_cmd); + break; + case ETHTOOL_GLINKSETTINGS: + rc = ethtool_get_link_ksettings(dev, useraddr); + break; + case ETHTOOL_SLINKSETTINGS: + rc = ethtool_set_link_ksettings(dev, useraddr); + break; + case ETHTOOL_PHY_GTUNABLE: + rc = get_phy_tunable(dev, useraddr); + break; + case ETHTOOL_PHY_STUNABLE: + rc = set_phy_tunable(dev, useraddr); + break; + case ETHTOOL_GFECPARAM: + rc = ethtool_get_fecparam(dev, useraddr); + break; + case ETHTOOL_SFECPARAM: + rc = ethtool_set_fecparam(dev, useraddr); + break; + default: + rc = -EOPNOTSUPP; + } + + if (dev->ethtool_ops->complete) + dev->ethtool_ops->complete(dev); + + if (old_features != dev->features) + netdev_features_change(dev); +out: + if (dev->dev.parent) + pm_runtime_put(dev->dev.parent); + netdev_unlock_ops(dev); + + return rc; +} + +int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr) +{ + struct ethtool_devlink_compat *state; + u32 ethcmd; + int rc; + + if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) + return -EFAULT; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + switch (ethcmd) { + case ETHTOOL_FLASHDEV: + if (copy_from_user(&state->efl, useraddr, sizeof(state->efl))) { + rc = -EFAULT; + goto exit_free; + } + state->efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; + break; + } + + rtnl_lock(); + rc = __dev_ethtool(net, ifr, useraddr, ethcmd, state); + rtnl_unlock(); + if (rc) + goto exit_free; + + switch (ethcmd) { + case ETHTOOL_FLASHDEV: + if (state->devlink) + rc = devlink_compat_flash_update(state->devlink, + state->efl.data); + break; + case ETHTOOL_GDRVINFO: + if (state->devlink) + devlink_compat_running_version(state->devlink, + state->info.fw_version, + sizeof(state->info.fw_version)); + if (copy_to_user(useraddr, &state->info, sizeof(state->info))) { + rc = -EFAULT; + goto exit_free; + } + break; + } + +exit_free: + if (state->devlink) + devlink_put(state->devlink); + kfree(state); + return rc; +} + +struct ethtool_rx_flow_key { + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_eth_addrs eth_addrs; +} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ + +struct ethtool_rx_flow_match { + struct flow_dissector dissector; + struct ethtool_rx_flow_key key; + struct ethtool_rx_flow_key mask; +}; + +struct ethtool_rx_flow_rule * +ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input) +{ + const struct ethtool_rx_flow_spec *fs = input->fs; + struct ethtool_rx_flow_match *match; + struct ethtool_rx_flow_rule *flow; + struct flow_action_entry *act; + + flow = kzalloc(sizeof(struct ethtool_rx_flow_rule) + + sizeof(struct ethtool_rx_flow_match), GFP_KERNEL); + if (!flow) + return ERR_PTR(-ENOMEM); + + /* ethtool_rx supports only one single action per rule. */ + flow->rule = flow_rule_alloc(1); + if (!flow->rule) { + kfree(flow); + return ERR_PTR(-ENOMEM); + } + + match = (struct ethtool_rx_flow_match *)flow->priv; + flow->rule->match.dissector = &match->dissector; + flow->rule->match.mask = &match->mask; + flow->rule->match.key = &match->key; + + match->mask.basic.n_proto = htons(0xffff); + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { + case ETHER_FLOW: { + const struct ethhdr *ether_spec, *ether_m_spec; + + ether_spec = &fs->h_u.ether_spec; + ether_m_spec = &fs->m_u.ether_spec; + + if (!is_zero_ether_addr(ether_m_spec->h_source)) { + ether_addr_copy(match->key.eth_addrs.src, + ether_spec->h_source); + ether_addr_copy(match->mask.eth_addrs.src, + ether_m_spec->h_source); + } + if (!is_zero_ether_addr(ether_m_spec->h_dest)) { + ether_addr_copy(match->key.eth_addrs.dst, + ether_spec->h_dest); + ether_addr_copy(match->mask.eth_addrs.dst, + ether_m_spec->h_dest); + } + if (ether_m_spec->h_proto) { + match->key.basic.n_proto = ether_spec->h_proto; + match->mask.basic.n_proto = ether_m_spec->h_proto; + } + } + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: { + const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; + + match->key.basic.n_proto = htons(ETH_P_IP); + + v4_spec = &fs->h_u.tcp_ip4_spec; + v4_m_spec = &fs->m_u.tcp_ip4_spec; + + if (v4_m_spec->ip4src) { + match->key.ipv4.src = v4_spec->ip4src; + match->mask.ipv4.src = v4_m_spec->ip4src; + } + if (v4_m_spec->ip4dst) { + match->key.ipv4.dst = v4_spec->ip4dst; + match->mask.ipv4.dst = v4_m_spec->ip4dst; + } + if (v4_m_spec->ip4src || + v4_m_spec->ip4dst) { + match->dissector.used_keys |= + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS); + match->dissector.offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = + offsetof(struct ethtool_rx_flow_key, ipv4); + } + if (v4_m_spec->psrc) { + match->key.tp.src = v4_spec->psrc; + match->mask.tp.src = v4_m_spec->psrc; + } + if (v4_m_spec->pdst) { + match->key.tp.dst = v4_spec->pdst; + match->mask.tp.dst = v4_m_spec->pdst; + } + if (v4_m_spec->psrc || + v4_m_spec->pdst) { + match->dissector.used_keys |= + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS); + match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] = + offsetof(struct ethtool_rx_flow_key, tp); + } + if (v4_m_spec->tos) { + match->key.ip.tos = v4_spec->tos; + match->mask.ip.tos = v4_m_spec->tos; + match->dissector.used_keys |= + BIT(FLOW_DISSECTOR_KEY_IP); + match->dissector.offset[FLOW_DISSECTOR_KEY_IP] = + offsetof(struct ethtool_rx_flow_key, ip); + } + } + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: { + const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; + + match->key.basic.n_proto = htons(ETH_P_IPV6); + + v6_spec = &fs->h_u.tcp_ip6_spec; + v6_m_spec = &fs->m_u.tcp_ip6_spec; + if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src)) { + memcpy(&match->key.ipv6.src, v6_spec->ip6src, + sizeof(match->key.ipv6.src)); + memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src, + sizeof(match->mask.ipv6.src)); + } + if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) { + memcpy(&match->key.ipv6.dst, v6_spec->ip6dst, + sizeof(match->key.ipv6.dst)); + memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst, + sizeof(match->mask.ipv6.dst)); + } + if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src) || + !ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) { + match->dissector.used_keys |= + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS); + match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = + offsetof(struct ethtool_rx_flow_key, ipv6); + } + if (v6_m_spec->psrc) { + match->key.tp.src = v6_spec->psrc; + match->mask.tp.src = v6_m_spec->psrc; + } + if (v6_m_spec->pdst) { + match->key.tp.dst = v6_spec->pdst; + match->mask.tp.dst = v6_m_spec->pdst; + } + if (v6_m_spec->psrc || + v6_m_spec->pdst) { + match->dissector.used_keys |= + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS); + match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] = + offsetof(struct ethtool_rx_flow_key, tp); + } + if (v6_m_spec->tclass) { + match->key.ip.tos = v6_spec->tclass; + match->mask.ip.tos = v6_m_spec->tclass; + match->dissector.used_keys |= + BIT_ULL(FLOW_DISSECTOR_KEY_IP); + match->dissector.offset[FLOW_DISSECTOR_KEY_IP] = + offsetof(struct ethtool_rx_flow_key, ip); + } + } + break; + default: + ethtool_rx_flow_rule_destroy(flow); + return ERR_PTR(-EINVAL); + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + match->key.basic.ip_proto = IPPROTO_TCP; + match->mask.basic.ip_proto = 0xff; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + match->key.basic.ip_proto = IPPROTO_UDP; + match->mask.basic.ip_proto = 0xff; + break; + } + + match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC); + match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] = + offsetof(struct ethtool_rx_flow_key, basic); + + if (fs->flow_type & FLOW_EXT) { + const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext; + const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext; + + if (ext_m_spec->vlan_etype) { + match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype; + match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype; + } + + if (ext_m_spec->vlan_tci) { + match->key.vlan.vlan_id = + ntohs(ext_h_spec->vlan_tci) & 0x0fff; + match->mask.vlan.vlan_id = + ntohs(ext_m_spec->vlan_tci) & 0x0fff; + + match->key.vlan.vlan_dei = + !!(ext_h_spec->vlan_tci & htons(0x1000)); + match->mask.vlan.vlan_dei = + !!(ext_m_spec->vlan_tci & htons(0x1000)); + + match->key.vlan.vlan_priority = + (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13; + match->mask.vlan.vlan_priority = + (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13; + } + + if (ext_m_spec->vlan_etype || + ext_m_spec->vlan_tci) { + match->dissector.used_keys |= + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN); + match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] = + offsetof(struct ethtool_rx_flow_key, vlan); + } + } + if (fs->flow_type & FLOW_MAC_EXT) { + const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext; + const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext; + + memcpy(match->key.eth_addrs.dst, ext_h_spec->h_dest, + ETH_ALEN); + memcpy(match->mask.eth_addrs.dst, ext_m_spec->h_dest, + ETH_ALEN); + + match->dissector.used_keys |= + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS); + match->dissector.offset[FLOW_DISSECTOR_KEY_ETH_ADDRS] = + offsetof(struct ethtool_rx_flow_key, eth_addrs); + } + + act = &flow->rule->action.entries[0]; + switch (fs->ring_cookie) { + case RX_CLS_FLOW_DISC: + act->id = FLOW_ACTION_DROP; + break; + case RX_CLS_FLOW_WAKE: + act->id = FLOW_ACTION_WAKE; + break; + default: + act->id = FLOW_ACTION_QUEUE; + if (fs->flow_type & FLOW_RSS) + act->queue.ctx = input->rss_ctx; + + act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie); + break; + } + + return flow; +} +EXPORT_SYMBOL(ethtool_rx_flow_rule_create); + +void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *flow) +{ + kfree(flow->rule); + kfree(flow); +} +EXPORT_SYMBOL(ethtool_rx_flow_rule_destroy); diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c new file mode 100644 index 000000000000..30b8ce275159 --- /dev/null +++ b/net/ethtool/linkinfo.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "netlink.h" +#include "common.h" + +struct linkinfo_req_info { + struct ethnl_req_info base; +}; + +struct linkinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; +}; + +#define LINKINFO_REPDATA(__reply_base) \ + container_of(__reply_base, struct linkinfo_reply_data, base) + +const struct nla_policy ethnl_linkinfo_get_policy[] = { + [ETHTOOL_A_LINKINFO_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int linkinfo_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct linkinfo_reply_data *data = LINKINFO_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + data->lsettings = &data->ksettings.base; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + ret = __ethtool_get_link_ksettings(dev, &data->ksettings); + if (ret < 0) + GENL_SET_ERR_MSG(info, "failed to retrieve link settings"); + ethnl_ops_complete(dev); + + return ret; +} + +static int linkinfo_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + return nla_total_size(sizeof(u8)) /* LINKINFO_PORT */ + + nla_total_size(sizeof(u8)) /* LINKINFO_PHYADDR */ + + nla_total_size(sizeof(u8)) /* LINKINFO_TP_MDIX */ + + nla_total_size(sizeof(u8)) /* LINKINFO_TP_MDIX_CTRL */ + + nla_total_size(sizeof(u8)) /* LINKINFO_TRANSCEIVER */ + + 0; +} + +static int linkinfo_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct linkinfo_reply_data *data = LINKINFO_REPDATA(reply_base); + + if (nla_put_u8(skb, ETHTOOL_A_LINKINFO_PORT, data->lsettings->port) || + nla_put_u8(skb, ETHTOOL_A_LINKINFO_PHYADDR, + data->lsettings->phy_address) || + nla_put_u8(skb, ETHTOOL_A_LINKINFO_TP_MDIX, + data->lsettings->eth_tp_mdix) || + nla_put_u8(skb, ETHTOOL_A_LINKINFO_TP_MDIX_CTRL, + data->lsettings->eth_tp_mdix_ctrl) || + nla_put_u8(skb, ETHTOOL_A_LINKINFO_TRANSCEIVER, + data->lsettings->transceiver)) + return -EMSGSIZE; + + return 0; +} + +/* LINKINFO_SET */ + +const struct nla_policy ethnl_linkinfo_set_policy[] = { + [ETHTOOL_A_LINKINFO_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_LINKINFO_PORT] = { .type = NLA_U8 }, + [ETHTOOL_A_LINKINFO_PHYADDR] = { .type = NLA_U8 }, + [ETHTOOL_A_LINKINFO_TP_MDIX_CTRL] = { .type = NLA_U8 }, +}; + +static int +ethnl_set_linkinfo_validate(struct ethnl_req_info *req_info, + struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + + if (!ops->get_link_ksettings || !ops->set_link_ksettings) + return -EOPNOTSUPP; + return 1; +} + +static int +ethnl_set_linkinfo(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct ethtool_link_ksettings ksettings = {}; + struct ethtool_link_settings *lsettings; + struct net_device *dev = req_info->dev; + struct nlattr **tb = info->attrs; + bool mod = false; + int ret; + + ret = __ethtool_get_link_ksettings(dev, &ksettings); + if (ret < 0) { + GENL_SET_ERR_MSG(info, "failed to retrieve link settings"); + return ret; + } + lsettings = &ksettings.base; + + ethnl_update_u8(&lsettings->port, tb[ETHTOOL_A_LINKINFO_PORT], &mod); + ethnl_update_u8(&lsettings->phy_address, tb[ETHTOOL_A_LINKINFO_PHYADDR], + &mod); + ethnl_update_u8(&lsettings->eth_tp_mdix_ctrl, + tb[ETHTOOL_A_LINKINFO_TP_MDIX_CTRL], &mod); + if (!mod) + return 0; + + ret = dev->ethtool_ops->set_link_ksettings(dev, &ksettings); + if (ret < 0) { + GENL_SET_ERR_MSG(info, "link settings update failed"); + return ret; + } + + return 1; +} + +const struct ethnl_request_ops ethnl_linkinfo_request_ops = { + .request_cmd = ETHTOOL_MSG_LINKINFO_GET, + .reply_cmd = ETHTOOL_MSG_LINKINFO_GET_REPLY, + .hdr_attr = ETHTOOL_A_LINKINFO_HEADER, + .req_info_size = sizeof(struct linkinfo_req_info), + .reply_data_size = sizeof(struct linkinfo_reply_data), + + .prepare_data = linkinfo_prepare_data, + .reply_size = linkinfo_reply_size, + .fill_reply = linkinfo_fill_reply, + + .set_validate = ethnl_set_linkinfo_validate, + .set = ethnl_set_linkinfo, + .set_ntf_cmd = ETHTOOL_MSG_LINKINFO_NTF, +}; diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c new file mode 100644 index 000000000000..259cd9ef1f2a --- /dev/null +++ b/net/ethtool/linkmodes.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "netlink.h" +#include "common.h" +#include "bitset.h" + +/* LINKMODES_GET */ + +struct linkmodes_req_info { + struct ethnl_req_info base; +}; + +struct linkmodes_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; + bool peer_empty; +}; + +#define LINKMODES_REPDATA(__reply_base) \ + container_of(__reply_base, struct linkmodes_reply_data, base) + +const struct nla_policy ethnl_linkmodes_get_policy[] = { + [ETHTOOL_A_LINKMODES_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int linkmodes_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct linkmodes_reply_data *data = LINKMODES_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + data->lsettings = &data->ksettings.base; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + ret = __ethtool_get_link_ksettings(dev, &data->ksettings); + if (ret < 0) { + GENL_SET_ERR_MSG(info, "failed to retrieve link settings"); + goto out; + } + + if (!dev->ethtool_ops->cap_link_lanes_supported) + data->ksettings.lanes = 0; + + data->peer_empty = + bitmap_empty(data->ksettings.link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + +out: + ethnl_ops_complete(dev); + return ret; +} + +static int linkmodes_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct linkmodes_reply_data *data = LINKMODES_REPDATA(reply_base); + const struct ethtool_link_ksettings *ksettings = &data->ksettings; + const struct ethtool_link_settings *lsettings = &ksettings->base; + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + int len, ret; + + len = nla_total_size(sizeof(u8)) /* LINKMODES_AUTONEG */ + + nla_total_size(sizeof(u32)) /* LINKMODES_SPEED */ + + nla_total_size(sizeof(u32)) /* LINKMODES_LANES */ + + nla_total_size(sizeof(u8)) /* LINKMODES_DUPLEX */ + + nla_total_size(sizeof(u8)) /* LINKMODES_RATE_MATCHING */ + + 0; + ret = ethnl_bitset_size(ksettings->link_modes.advertising, + ksettings->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, + link_mode_names, compact); + if (ret < 0) + return ret; + len += ret; + if (!data->peer_empty) { + ret = ethnl_bitset_size(ksettings->link_modes.lp_advertising, + NULL, __ETHTOOL_LINK_MODE_MASK_NBITS, + link_mode_names, compact); + if (ret < 0) + return ret; + len += ret; + } + + if (lsettings->master_slave_cfg != MASTER_SLAVE_CFG_UNSUPPORTED) + len += nla_total_size(sizeof(u8)); + + if (lsettings->master_slave_state != MASTER_SLAVE_STATE_UNSUPPORTED) + len += nla_total_size(sizeof(u8)); + + return len; +} + +static int linkmodes_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct linkmodes_reply_data *data = LINKMODES_REPDATA(reply_base); + const struct ethtool_link_ksettings *ksettings = &data->ksettings; + const struct ethtool_link_settings *lsettings = &ksettings->base; + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + int ret; + + if (nla_put_u8(skb, ETHTOOL_A_LINKMODES_AUTONEG, lsettings->autoneg)) + return -EMSGSIZE; + + ret = ethnl_put_bitset(skb, ETHTOOL_A_LINKMODES_OURS, + ksettings->link_modes.advertising, + ksettings->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, link_mode_names, + compact); + if (ret < 0) + return -EMSGSIZE; + if (!data->peer_empty) { + ret = ethnl_put_bitset(skb, ETHTOOL_A_LINKMODES_PEER, + ksettings->link_modes.lp_advertising, + NULL, __ETHTOOL_LINK_MODE_MASK_NBITS, + link_mode_names, compact); + if (ret < 0) + return -EMSGSIZE; + } + + if (nla_put_u32(skb, ETHTOOL_A_LINKMODES_SPEED, lsettings->speed) || + nla_put_u8(skb, ETHTOOL_A_LINKMODES_DUPLEX, lsettings->duplex)) + return -EMSGSIZE; + + if (ksettings->lanes && + nla_put_u32(skb, ETHTOOL_A_LINKMODES_LANES, ksettings->lanes)) + return -EMSGSIZE; + + if (lsettings->master_slave_cfg != MASTER_SLAVE_CFG_UNSUPPORTED && + nla_put_u8(skb, ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, + lsettings->master_slave_cfg)) + return -EMSGSIZE; + + if (lsettings->master_slave_state != MASTER_SLAVE_STATE_UNSUPPORTED && + nla_put_u8(skb, ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, + lsettings->master_slave_state)) + return -EMSGSIZE; + + if (nla_put_u8(skb, ETHTOOL_A_LINKMODES_RATE_MATCHING, + lsettings->rate_matching)) + return -EMSGSIZE; + + return 0; +} + +/* LINKMODES_SET */ + +const struct nla_policy ethnl_linkmodes_set_policy[] = { + [ETHTOOL_A_LINKMODES_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_LINKMODES_AUTONEG] = { .type = NLA_U8 }, + [ETHTOOL_A_LINKMODES_OURS] = { .type = NLA_NESTED }, + [ETHTOOL_A_LINKMODES_SPEED] = { .type = NLA_U32 }, + [ETHTOOL_A_LINKMODES_DUPLEX] = { .type = NLA_U8 }, + [ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG] = { .type = NLA_U8 }, + [ETHTOOL_A_LINKMODES_LANES] = NLA_POLICY_RANGE(NLA_U32, 1, 8), +}; + +/* Set advertised link modes to all supported modes matching requested speed, + * lanes and duplex values. Called when autonegotiation is on, speed, lanes or + * duplex is requested but no link mode change. This is done in userspace with + * ioctl() interface, move it into kernel for netlink. + * Returns true if advertised modes bitmap was modified. + */ +static bool ethnl_auto_linkmodes(struct ethtool_link_ksettings *ksettings, + bool req_speed, bool req_lanes, bool req_duplex) +{ + unsigned long *advertising = ksettings->link_modes.advertising; + unsigned long *supported = ksettings->link_modes.supported; + DECLARE_BITMAP(old_adv, __ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int i; + + bitmap_copy(old_adv, advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + + for (i = 0; i < __ETHTOOL_LINK_MODE_MASK_NBITS; i++) { + const struct link_mode_info *info = &link_mode_params[i]; + + if (info->speed == SPEED_UNKNOWN) + continue; + if (test_bit(i, supported) && + (!req_speed || info->speed == ksettings->base.speed) && + (!req_lanes || info->lanes == ksettings->lanes) && + (!req_duplex || info->duplex == ksettings->base.duplex)) + set_bit(i, advertising); + else + clear_bit(i, advertising); + } + + return !bitmap_equal(old_adv, advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static bool ethnl_validate_master_slave_cfg(u8 cfg) +{ + switch (cfg) { + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + case MASTER_SLAVE_CFG_MASTER_FORCE: + case MASTER_SLAVE_CFG_SLAVE_FORCE: + return true; + } + + return false; +} + +static int ethnl_check_linkmodes(struct genl_info *info, struct nlattr **tb) +{ + const struct nlattr *master_slave_cfg, *lanes_cfg; + + master_slave_cfg = tb[ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG]; + if (master_slave_cfg && + !ethnl_validate_master_slave_cfg(nla_get_u8(master_slave_cfg))) { + NL_SET_ERR_MSG_ATTR(info->extack, master_slave_cfg, + "master/slave value is invalid"); + return -EOPNOTSUPP; + } + + lanes_cfg = tb[ETHTOOL_A_LINKMODES_LANES]; + if (lanes_cfg && !is_power_of_2(nla_get_u32(lanes_cfg))) { + NL_SET_ERR_MSG_ATTR(info->extack, lanes_cfg, + "lanes value is invalid"); + return -EINVAL; + } + + return 0; +} + +static int ethnl_update_linkmodes(struct genl_info *info, struct nlattr **tb, + struct ethtool_link_ksettings *ksettings, + bool *mod, const struct net_device *dev) +{ + struct ethtool_link_settings *lsettings = &ksettings->base; + bool req_speed, req_lanes, req_duplex; + const struct nlattr *master_slave_cfg, *lanes_cfg; + int ret; + + master_slave_cfg = tb[ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG]; + if (master_slave_cfg) { + if (lsettings->master_slave_cfg == MASTER_SLAVE_CFG_UNSUPPORTED) { + NL_SET_ERR_MSG_ATTR(info->extack, master_slave_cfg, + "master/slave configuration not supported by device"); + return -EOPNOTSUPP; + } + } + + *mod = false; + req_speed = tb[ETHTOOL_A_LINKMODES_SPEED]; + req_lanes = tb[ETHTOOL_A_LINKMODES_LANES]; + req_duplex = tb[ETHTOOL_A_LINKMODES_DUPLEX]; + + ethnl_update_u8(&lsettings->autoneg, tb[ETHTOOL_A_LINKMODES_AUTONEG], + mod); + + lanes_cfg = tb[ETHTOOL_A_LINKMODES_LANES]; + if (lanes_cfg) { + /* If autoneg is off and lanes parameter is not supported by the + * driver, return an error. + */ + if (!lsettings->autoneg && + !dev->ethtool_ops->cap_link_lanes_supported) { + NL_SET_ERR_MSG_ATTR(info->extack, lanes_cfg, + "lanes configuration not supported by device"); + return -EOPNOTSUPP; + } + } else if (!lsettings->autoneg && ksettings->lanes) { + /* If autoneg is off and lanes parameter is not passed from user but + * it was defined previously then set the lanes parameter to 0. + */ + ksettings->lanes = 0; + *mod = true; + } + + ret = ethnl_update_bitset(ksettings->link_modes.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS, + tb[ETHTOOL_A_LINKMODES_OURS], link_mode_names, + info->extack, mod); + if (ret < 0) + return ret; + ethnl_update_u32(&lsettings->speed, tb[ETHTOOL_A_LINKMODES_SPEED], + mod); + ethnl_update_u32(&ksettings->lanes, lanes_cfg, mod); + ethnl_update_u8(&lsettings->duplex, tb[ETHTOOL_A_LINKMODES_DUPLEX], + mod); + ethnl_update_u8(&lsettings->master_slave_cfg, master_slave_cfg, mod); + + if (!tb[ETHTOOL_A_LINKMODES_OURS] && lsettings->autoneg && + (req_speed || req_lanes || req_duplex) && + ethnl_auto_linkmodes(ksettings, req_speed, req_lanes, req_duplex)) + *mod = true; + + return 0; +} + +static int +ethnl_set_linkmodes_validate(struct ethnl_req_info *req_info, + struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + int ret; + + ret = ethnl_check_linkmodes(info, info->attrs); + if (ret < 0) + return ret; + + if (!ops->get_link_ksettings || !ops->set_link_ksettings) + return -EOPNOTSUPP; + return 1; +} + +static int +ethnl_set_linkmodes(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct ethtool_link_ksettings ksettings = {}; + struct net_device *dev = req_info->dev; + struct nlattr **tb = info->attrs; + bool mod = false; + int ret; + + ret = __ethtool_get_link_ksettings(dev, &ksettings); + if (ret < 0) { + GENL_SET_ERR_MSG(info, "failed to retrieve link settings"); + return ret; + } + + ret = ethnl_update_linkmodes(info, tb, &ksettings, &mod, dev); + if (ret < 0) + return ret; + if (!mod) + return 0; + + ret = dev->ethtool_ops->set_link_ksettings(dev, &ksettings); + if (ret < 0) { + GENL_SET_ERR_MSG(info, "link settings update failed"); + return ret; + } + + return 1; +} + +const struct ethnl_request_ops ethnl_linkmodes_request_ops = { + .request_cmd = ETHTOOL_MSG_LINKMODES_GET, + .reply_cmd = ETHTOOL_MSG_LINKMODES_GET_REPLY, + .hdr_attr = ETHTOOL_A_LINKMODES_HEADER, + .req_info_size = sizeof(struct linkmodes_req_info), + .reply_data_size = sizeof(struct linkmodes_reply_data), + + .prepare_data = linkmodes_prepare_data, + .reply_size = linkmodes_reply_size, + .fill_reply = linkmodes_fill_reply, + + .set_validate = ethnl_set_linkmodes_validate, + .set = ethnl_set_linkmodes, + .set_ntf_cmd = ETHTOOL_MSG_LINKMODES_NTF, +}; diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c new file mode 100644 index 000000000000..05a5f72c99fa --- /dev/null +++ b/net/ethtool/linkstate.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "netlink.h" +#include "common.h" +#include <linux/phy.h> +#include <linux/phylib_stubs.h> + +struct linkstate_req_info { + struct ethnl_req_info base; +}; + +struct linkstate_reply_data { + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + struct ethtool_link_ext_stats link_stats; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; +}; + +#define LINKSTATE_REPDATA(__reply_base) \ + container_of(__reply_base, struct linkstate_reply_data, base) + +const struct nla_policy ethnl_linkstate_get_policy[] = { + [ETHTOOL_A_LINKSTATE_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy_stats), +}; + +static int linkstate_get_sqi(struct phy_device *phydev) +{ + int ret; + + if (!phydev) + return -EOPNOTSUPP; + + mutex_lock(&phydev->lock); + if (!phydev->drv || !phydev->drv->get_sqi) + ret = -EOPNOTSUPP; + else if (!phydev->link) + ret = -ENETDOWN; + else + ret = phydev->drv->get_sqi(phydev); + mutex_unlock(&phydev->lock); + + return ret; +} + +static int linkstate_get_sqi_max(struct phy_device *phydev) +{ + int ret; + + if (!phydev) + return -EOPNOTSUPP; + + mutex_lock(&phydev->lock); + if (!phydev->drv || !phydev->drv->get_sqi_max) + ret = -EOPNOTSUPP; + else if (!phydev->link) + ret = -ENETDOWN; + else + ret = phydev->drv->get_sqi_max(phydev); + mutex_unlock(&phydev->lock); + + return ret; +}; + +static bool linkstate_sqi_critical_error(int sqi) +{ + return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN; +} + +static bool linkstate_sqi_valid(struct linkstate_reply_data *data) +{ + return data->sqi >= 0 && data->sqi_max >= 0 && + data->sqi <= data->sqi_max; +} + +static int linkstate_get_link_ext_state(struct net_device *dev, + struct linkstate_reply_data *data) +{ + int err; + + if (!dev->ethtool_ops->get_link_ext_state) + return -EOPNOTSUPP; + + err = dev->ethtool_ops->get_link_ext_state(dev, &data->ethtool_link_ext_state_info); + if (err) + return err; + + data->link_ext_state_provided = true; + + return 0; +} + +static int linkstate_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; + int ret; + + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_LINKSTATE_HEADER, + info->extack); + if (IS_ERR(phydev)) { + ret = PTR_ERR(phydev); + goto out; + } + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + data->link = __ethtool_get_link(dev); + + ret = linkstate_get_sqi(phydev); + if (linkstate_sqi_critical_error(ret)) + goto out; + data->sqi = ret; + + ret = linkstate_get_sqi_max(phydev); + if (linkstate_sqi_critical_error(ret)) + goto out; + data->sqi_max = ret; + + if (dev->flags & IFF_UP) { + ret = linkstate_get_link_ext_state(dev, data); + if (ret < 0 && ret != -EOPNOTSUPP && ret != -ENODATA) + goto out; + } + + ethtool_stats_init((u64 *)&data->link_stats, + sizeof(data->link_stats) / 8); + + if (req_base->flags & ETHTOOL_FLAG_STATS) { + if (phydev) + phy_ethtool_get_link_ext_stats(phydev, + &data->link_stats); + + if (dev->ethtool_ops->get_link_ext_stats) + dev->ethtool_ops->get_link_ext_stats(dev, + &data->link_stats); + } + + ret = 0; +out: + ethnl_ops_complete(dev); + return ret; +} + +static int linkstate_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base); + int len; + + len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */ + + 0; + + if (linkstate_sqi_valid(data)) { + len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */ + len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */ + } + + if (data->link_ext_state_provided) + len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */ + + if (data->ethtool_link_ext_state_info.__link_ext_substate) + len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_SUBSTATE */ + + if (data->link_stats.link_down_events != ETHTOOL_STAT_NOT_SET) + len += nla_total_size(sizeof(u32)); + + return len; +} + +static int linkstate_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base); + + if (data->link >= 0 && + nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link)) + return -EMSGSIZE; + + if (linkstate_sqi_valid(data)) { + if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi)) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, + data->sqi_max)) + return -EMSGSIZE; + } + + if (data->link_ext_state_provided) { + if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE, + data->ethtool_link_ext_state_info.link_ext_state)) + return -EMSGSIZE; + + if (data->ethtool_link_ext_state_info.__link_ext_substate && + nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, + data->ethtool_link_ext_state_info.__link_ext_substate)) + return -EMSGSIZE; + } + + if (data->link_stats.link_down_events != ETHTOOL_STAT_NOT_SET) + if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT, + data->link_stats.link_down_events)) + return -EMSGSIZE; + + return 0; +} + +const struct ethnl_request_ops ethnl_linkstate_request_ops = { + .request_cmd = ETHTOOL_MSG_LINKSTATE_GET, + .reply_cmd = ETHTOOL_MSG_LINKSTATE_GET_REPLY, + .hdr_attr = ETHTOOL_A_LINKSTATE_HEADER, + .req_info_size = sizeof(struct linkstate_req_info), + .reply_data_size = sizeof(struct linkstate_reply_data), + + .prepare_data = linkstate_prepare_data, + .reply_size = linkstate_reply_size, + .fill_reply = linkstate_fill_reply, +}; diff --git a/net/ethtool/mm.c b/net/ethtool/mm.c new file mode 100644 index 000000000000..29bbbc149375 --- /dev/null +++ b/net/ethtool/mm.c @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2022-2025 NXP + * Copyright 2024 Furong Xu <0x1207@gmail.com> + */ +#include "common.h" +#include "netlink.h" + +struct mm_req_info { + struct ethnl_req_info base; +}; + +struct mm_reply_data { + struct ethnl_reply_data base; + struct ethtool_mm_state state; + struct ethtool_mm_stats stats; +}; + +#define MM_REPDATA(__reply_base) \ + container_of(__reply_base, struct mm_reply_data, base) + +#define ETHTOOL_MM_STAT_CNT \ + (__ETHTOOL_A_MM_STAT_CNT - (ETHTOOL_A_MM_STAT_PAD + 1)) + +const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1] = { + [ETHTOOL_A_MM_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_stats), +}; + +static int mm_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct mm_reply_data *data = MM_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + const struct ethtool_ops *ops; + int ret; + + ops = dev->ethtool_ops; + + if (!ops->get_mm) + return -EOPNOTSUPP; + + ethtool_stats_init((u64 *)&data->stats, + sizeof(data->stats) / sizeof(u64)); + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + ret = ops->get_mm(dev, &data->state); + if (ret) + goto out_complete; + + if (ops->get_mm_stats && (req_base->flags & ETHTOOL_FLAG_STATS)) + ops->get_mm_stats(dev, &data->stats); + +out_complete: + ethnl_ops_complete(dev); + + return ret; +} + +static int mm_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + int len = 0; + + len += nla_total_size(sizeof(u8)); /* _MM_PMAC_ENABLED */ + len += nla_total_size(sizeof(u8)); /* _MM_TX_ENABLED */ + len += nla_total_size(sizeof(u8)); /* _MM_TX_ACTIVE */ + len += nla_total_size(sizeof(u8)); /* _MM_VERIFY_ENABLED */ + len += nla_total_size(sizeof(u8)); /* _MM_VERIFY_STATUS */ + len += nla_total_size(sizeof(u32)); /* _MM_VERIFY_TIME */ + len += nla_total_size(sizeof(u32)); /* _MM_MAX_VERIFY_TIME */ + len += nla_total_size(sizeof(u32)); /* _MM_TX_MIN_FRAG_SIZE */ + len += nla_total_size(sizeof(u32)); /* _MM_RX_MIN_FRAG_SIZE */ + + if (req_base->flags & ETHTOOL_FLAG_STATS) + len += nla_total_size(0) + /* _MM_STATS */ + nla_total_size_64bit(sizeof(u64)) * ETHTOOL_MM_STAT_CNT; + + return len; +} + +static int mm_put_stat(struct sk_buff *skb, u64 val, u16 attrtype) +{ + if (val == ETHTOOL_STAT_NOT_SET) + return 0; + if (nla_put_u64_64bit(skb, attrtype, val, ETHTOOL_A_MM_STAT_PAD)) + return -EMSGSIZE; + return 0; +} + +static int mm_put_stats(struct sk_buff *skb, + const struct ethtool_mm_stats *stats) +{ + struct nlattr *nest; + + nest = nla_nest_start(skb, ETHTOOL_A_MM_STATS); + if (!nest) + return -EMSGSIZE; + + if (mm_put_stat(skb, stats->MACMergeFrameAssErrorCount, + ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS) || + mm_put_stat(skb, stats->MACMergeFrameSmdErrorCount, + ETHTOOL_A_MM_STAT_SMD_ERRORS) || + mm_put_stat(skb, stats->MACMergeFrameAssOkCount, + ETHTOOL_A_MM_STAT_REASSEMBLY_OK) || + mm_put_stat(skb, stats->MACMergeFragCountRx, + ETHTOOL_A_MM_STAT_RX_FRAG_COUNT) || + mm_put_stat(skb, stats->MACMergeFragCountTx, + ETHTOOL_A_MM_STAT_TX_FRAG_COUNT) || + mm_put_stat(skb, stats->MACMergeHoldCount, + ETHTOOL_A_MM_STAT_HOLD_COUNT)) + goto err_cancel; + + nla_nest_end(skb, nest); + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int mm_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct mm_reply_data *data = MM_REPDATA(reply_base); + const struct ethtool_mm_state *state = &data->state; + + if (nla_put_u8(skb, ETHTOOL_A_MM_TX_ENABLED, state->tx_enabled) || + nla_put_u8(skb, ETHTOOL_A_MM_TX_ACTIVE, state->tx_active) || + nla_put_u8(skb, ETHTOOL_A_MM_PMAC_ENABLED, state->pmac_enabled) || + nla_put_u8(skb, ETHTOOL_A_MM_VERIFY_ENABLED, state->verify_enabled) || + nla_put_u8(skb, ETHTOOL_A_MM_VERIFY_STATUS, state->verify_status) || + nla_put_u32(skb, ETHTOOL_A_MM_VERIFY_TIME, state->verify_time) || + nla_put_u32(skb, ETHTOOL_A_MM_MAX_VERIFY_TIME, state->max_verify_time) || + nla_put_u32(skb, ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, state->tx_min_frag_size) || + nla_put_u32(skb, ETHTOOL_A_MM_RX_MIN_FRAG_SIZE, state->rx_min_frag_size)) + return -EMSGSIZE; + + if (req_base->flags & ETHTOOL_FLAG_STATS && + mm_put_stats(skb, &data->stats)) + return -EMSGSIZE; + + return 0; +} + +const struct nla_policy ethnl_mm_set_policy[ETHTOOL_A_MM_MAX + 1] = { + [ETHTOOL_A_MM_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_MM_VERIFY_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1), + [ETHTOOL_A_MM_VERIFY_TIME] = NLA_POLICY_RANGE(NLA_U32, 1, 128), + [ETHTOOL_A_MM_TX_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1), + [ETHTOOL_A_MM_PMAC_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1), + [ETHTOOL_A_MM_TX_MIN_FRAG_SIZE] = NLA_POLICY_RANGE(NLA_U32, 60, 252), +}; + +static void mm_state_to_cfg(const struct ethtool_mm_state *state, + struct ethtool_mm_cfg *cfg) +{ + /* We could also compare state->verify_status against + * ETHTOOL_MM_VERIFY_STATUS_DISABLED, but state->verify_enabled + * is more like an administrative state which should be seen in + * ETHTOOL_MSG_MM_GET replies. For example, a port with verification + * disabled might be in the ETHTOOL_MM_VERIFY_STATUS_INITIAL + * if it's down. + */ + cfg->verify_enabled = state->verify_enabled; + cfg->verify_time = state->verify_time; + cfg->tx_enabled = state->tx_enabled; + cfg->pmac_enabled = state->pmac_enabled; + cfg->tx_min_frag_size = state->tx_min_frag_size; +} + +static int +ethnl_set_mm_validate(struct ethnl_req_info *req_info, struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + + return ops->get_mm && ops->set_mm ? 1 : -EOPNOTSUPP; +} + +static int ethnl_set_mm(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct netlink_ext_ack *extack = info->extack; + struct net_device *dev = req_info->dev; + struct ethtool_mm_state state = {}; + struct nlattr **tb = info->attrs; + struct ethtool_mm_cfg cfg = {}; + bool mod = false; + int ret; + + ret = dev->ethtool_ops->get_mm(dev, &state); + if (ret) + return ret; + + mm_state_to_cfg(&state, &cfg); + + ethnl_update_bool(&cfg.verify_enabled, tb[ETHTOOL_A_MM_VERIFY_ENABLED], + &mod); + ethnl_update_u32(&cfg.verify_time, tb[ETHTOOL_A_MM_VERIFY_TIME], &mod); + ethnl_update_bool(&cfg.tx_enabled, tb[ETHTOOL_A_MM_TX_ENABLED], &mod); + ethnl_update_bool(&cfg.pmac_enabled, tb[ETHTOOL_A_MM_PMAC_ENABLED], + &mod); + ethnl_update_u32(&cfg.tx_min_frag_size, + tb[ETHTOOL_A_MM_TX_MIN_FRAG_SIZE], &mod); + + if (!mod) + return 0; + + if (cfg.verify_time > state.max_verify_time) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MM_VERIFY_TIME], + "verifyTime exceeds device maximum"); + return -ERANGE; + } + + if (cfg.verify_enabled && !cfg.tx_enabled) { + NL_SET_ERR_MSG(extack, "Verification requires TX enabled"); + return -EINVAL; + } + + if (cfg.tx_enabled && !cfg.pmac_enabled) { + NL_SET_ERR_MSG(extack, "TX enabled requires pMAC enabled"); + return -EINVAL; + } + + ret = dev->ethtool_ops->set_mm(dev, &cfg, extack); + return ret < 0 ? ret : 1; +} + +const struct ethnl_request_ops ethnl_mm_request_ops = { + .request_cmd = ETHTOOL_MSG_MM_GET, + .reply_cmd = ETHTOOL_MSG_MM_GET_REPLY, + .hdr_attr = ETHTOOL_A_MM_HEADER, + .req_info_size = sizeof(struct mm_req_info), + .reply_data_size = sizeof(struct mm_reply_data), + + .prepare_data = mm_prepare_data, + .reply_size = mm_reply_size, + .fill_reply = mm_fill_reply, + + .set_validate = ethnl_set_mm_validate, + .set = ethnl_set_mm, + .set_ntf_cmd = ETHTOOL_MSG_MM_NTF, +}; + +/* Returns whether a given device supports the MAC merge layer + * (has an eMAC and a pMAC). Must be called under rtnl_lock() and + * ethnl_ops_begin(). + */ +bool __ethtool_dev_mm_supported(struct net_device *dev) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_mm_state state = {}; + int ret = -EOPNOTSUPP; + + if (ops && ops->get_mm) + ret = ops->get_mm(dev, &state); + + return !ret; +} + +bool ethtool_dev_mm_supported(struct net_device *dev) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + bool supported; + int ret; + + ASSERT_RTNL(); + + if (!ops) + return false; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return false; + + supported = __ethtool_dev_mm_supported(dev); + + ethnl_ops_complete(dev); + + return supported; +} +EXPORT_SYMBOL_GPL(ethtool_dev_mm_supported); + +static void ethtool_mmsv_configure_tx(struct ethtool_mmsv *mmsv, + bool tx_active) +{ + if (mmsv->ops->configure_tx) + mmsv->ops->configure_tx(mmsv, tx_active); +} + +static void ethtool_mmsv_configure_pmac(struct ethtool_mmsv *mmsv, + bool pmac_enabled) +{ + if (mmsv->ops->configure_pmac) + mmsv->ops->configure_pmac(mmsv, pmac_enabled); +} + +static void ethtool_mmsv_send_mpacket(struct ethtool_mmsv *mmsv, + enum ethtool_mpacket mpacket) +{ + if (mmsv->ops->send_mpacket) + mmsv->ops->send_mpacket(mmsv, mpacket); +} + +/** + * ethtool_mmsv_verify_timer - Timer for MAC Merge verification + * @t: timer_list struct containing private info + * + * Verify the MAC Merge capability in the local TX direction, by + * transmitting Verify mPackets up to 3 times. Wait until link + * partner responds with a Response mPacket, otherwise fail. + */ +static void ethtool_mmsv_verify_timer(struct timer_list *t) +{ + struct ethtool_mmsv *mmsv = timer_container_of(mmsv, t, verify_timer); + unsigned long flags; + bool rearm = false; + + spin_lock_irqsave(&mmsv->lock, flags); + + switch (mmsv->status) { + case ETHTOOL_MM_VERIFY_STATUS_INITIAL: + case ETHTOOL_MM_VERIFY_STATUS_VERIFYING: + if (mmsv->verify_retries != 0) { + ethtool_mmsv_send_mpacket(mmsv, ETHTOOL_MPACKET_VERIFY); + rearm = true; + } else { + mmsv->status = ETHTOOL_MM_VERIFY_STATUS_FAILED; + } + + mmsv->verify_retries--; + break; + + case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED: + ethtool_mmsv_configure_tx(mmsv, true); + break; + + default: + break; + } + + if (rearm) { + mod_timer(&mmsv->verify_timer, + jiffies + msecs_to_jiffies(mmsv->verify_time)); + } + + spin_unlock_irqrestore(&mmsv->lock, flags); +} + +static void ethtool_mmsv_verify_timer_arm(struct ethtool_mmsv *mmsv) +{ + if (mmsv->pmac_enabled && mmsv->tx_enabled && mmsv->verify_enabled && + mmsv->status != ETHTOOL_MM_VERIFY_STATUS_FAILED && + mmsv->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) { + timer_setup(&mmsv->verify_timer, ethtool_mmsv_verify_timer, 0); + mod_timer(&mmsv->verify_timer, jiffies); + } +} + +static void ethtool_mmsv_apply(struct ethtool_mmsv *mmsv) +{ + /* If verification is disabled, configure FPE right away. + * Otherwise let the timer code do it. + */ + if (!mmsv->verify_enabled) { + ethtool_mmsv_configure_pmac(mmsv, mmsv->pmac_enabled); + ethtool_mmsv_configure_tx(mmsv, mmsv->tx_enabled); + } else { + mmsv->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL; + mmsv->verify_retries = ETHTOOL_MM_MAX_VERIFY_RETRIES; + + if (netif_running(mmsv->dev)) + ethtool_mmsv_verify_timer_arm(mmsv); + } +} + +/** + * ethtool_mmsv_stop() - Stop MAC Merge Software Verification + * @mmsv: MAC Merge Software Verification state + * + * Drivers should call this method in a state where the hardware is + * about to lose state, like ndo_stop() or suspend(), and turning off + * MAC Merge features would be superfluous. Otherwise, prefer + * ethtool_mmsv_link_state_handle() with up=false. + */ +void ethtool_mmsv_stop(struct ethtool_mmsv *mmsv) +{ + timer_shutdown_sync(&mmsv->verify_timer); +} +EXPORT_SYMBOL_GPL(ethtool_mmsv_stop); + +/** + * ethtool_mmsv_link_state_handle() - Inform MAC Merge Software Verification + * of link state changes + * @mmsv: MAC Merge Software Verification state + * @up: True if device carrier is up and able to pass verification packets + * + * Calling context is expected to be from a task, interrupts enabled. + */ +void ethtool_mmsv_link_state_handle(struct ethtool_mmsv *mmsv, bool up) +{ + unsigned long flags; + + ethtool_mmsv_stop(mmsv); + + spin_lock_irqsave(&mmsv->lock, flags); + + if (up && mmsv->pmac_enabled) { + /* VERIFY process requires pMAC enabled when NIC comes up */ + ethtool_mmsv_configure_pmac(mmsv, true); + + /* New link => maybe new partner => new verification process */ + ethtool_mmsv_apply(mmsv); + } else { + /* Reset the reported verification state while the link is down */ + if (mmsv->verify_enabled) + mmsv->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL; + + /* No link or pMAC not enabled */ + ethtool_mmsv_configure_pmac(mmsv, false); + ethtool_mmsv_configure_tx(mmsv, false); + } + + spin_unlock_irqrestore(&mmsv->lock, flags); +} +EXPORT_SYMBOL_GPL(ethtool_mmsv_link_state_handle); + +/** + * ethtool_mmsv_event_handle() - Inform MAC Merge Software Verification + * of interrupt-based events + * @mmsv: MAC Merge Software Verification state + * @event: Event which took place (packet transmission or reception) + * + * Calling context expects to have interrupts disabled. + */ +void ethtool_mmsv_event_handle(struct ethtool_mmsv *mmsv, + enum ethtool_mmsv_event event) +{ + /* This is interrupt context, just spin_lock() */ + spin_lock(&mmsv->lock); + + if (!mmsv->pmac_enabled) + goto unlock; + + switch (event) { + case ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET: + /* Link partner has sent verify mPacket */ + ethtool_mmsv_send_mpacket(mmsv, ETHTOOL_MPACKET_RESPONSE); + break; + case ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET: + /* Local device has sent verify mPacket */ + if (mmsv->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) + mmsv->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING; + break; + case ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET: + /* Link partner has sent response mPacket */ + if (mmsv->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING) + mmsv->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED; + break; + } + +unlock: + spin_unlock(&mmsv->lock); +} +EXPORT_SYMBOL_GPL(ethtool_mmsv_event_handle); + +static bool ethtool_mmsv_is_tx_active(struct ethtool_mmsv *mmsv) +{ + /* TX is active if administratively enabled, and verification either + * succeeded, or was administratively disabled. + */ + return mmsv->tx_enabled && + (mmsv->status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED || + mmsv->status == ETHTOOL_MM_VERIFY_STATUS_DISABLED); +} + +/** + * ethtool_mmsv_get_mm() - get_mm() hook for MAC Merge Software Verification + * @mmsv: MAC Merge Software Verification state + * @state: see struct ethtool_mm_state + * + * Drivers are expected to call this from their ethtool_ops :: get_mm() + * method. + */ +void ethtool_mmsv_get_mm(struct ethtool_mmsv *mmsv, + struct ethtool_mm_state *state) +{ + unsigned long flags; + + spin_lock_irqsave(&mmsv->lock, flags); + + state->max_verify_time = ETHTOOL_MM_MAX_VERIFY_TIME_MS; + state->verify_enabled = mmsv->verify_enabled; + state->pmac_enabled = mmsv->pmac_enabled; + state->verify_time = mmsv->verify_time; + state->tx_enabled = mmsv->tx_enabled; + state->verify_status = mmsv->status; + state->tx_active = ethtool_mmsv_is_tx_active(mmsv); + + spin_unlock_irqrestore(&mmsv->lock, flags); +} +EXPORT_SYMBOL_GPL(ethtool_mmsv_get_mm); + +/** + * ethtool_mmsv_set_mm() - set_mm() hook for MAC Merge Software Verification + * @mmsv: MAC Merge Software Verification state + * @cfg: see struct ethtool_mm_cfg + * + * Drivers are expected to call this from their ethtool_ops :: set_mm() + * method. + */ +void ethtool_mmsv_set_mm(struct ethtool_mmsv *mmsv, struct ethtool_mm_cfg *cfg) +{ + unsigned long flags; + + /* Wait for the verification that's currently in progress to finish */ + ethtool_mmsv_stop(mmsv); + + spin_lock_irqsave(&mmsv->lock, flags); + + mmsv->verify_enabled = cfg->verify_enabled; + mmsv->pmac_enabled = cfg->pmac_enabled; + mmsv->verify_time = cfg->verify_time; + mmsv->tx_enabled = cfg->tx_enabled; + + if (!cfg->verify_enabled) + mmsv->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; + + ethtool_mmsv_apply(mmsv); + + spin_unlock_irqrestore(&mmsv->lock, flags); +} +EXPORT_SYMBOL_GPL(ethtool_mmsv_set_mm); + +/** + * ethtool_mmsv_init() - Initialize MAC Merge Software Verification state + * @mmsv: MAC Merge Software Verification state + * @dev: Pointer to network interface + * @ops: Methods for implementing the generic functionality + * + * The MAC Merge Software Verification is a timer- and event-based state + * machine intended for network interfaces which lack a hardware-based + * TX verification process (as per IEEE 802.3 clause 99.4.3). The timer + * is managed by the core code, whereas events are supplied by the + * driver explicitly calling one of the other API functions. + */ +void ethtool_mmsv_init(struct ethtool_mmsv *mmsv, struct net_device *dev, + const struct ethtool_mmsv_ops *ops) +{ + mmsv->ops = ops; + mmsv->dev = dev; + mmsv->verify_retries = ETHTOOL_MM_MAX_VERIFY_RETRIES; + mmsv->verify_time = ETHTOOL_MM_MAX_VERIFY_TIME_MS; + mmsv->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; + timer_setup(&mmsv->verify_timer, ethtool_mmsv_verify_timer, 0); + spin_lock_init(&mmsv->lock); +} +EXPORT_SYMBOL_GPL(ethtool_mmsv_init); diff --git a/net/ethtool/module.c b/net/ethtool/module.c new file mode 100644 index 000000000000..4d4e0a82579a --- /dev/null +++ b/net/ethtool/module.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool.h> +#include <linux/firmware.h> +#include <linux/sfp.h> +#include <net/devlink.h> +#include <net/netdev_lock.h> + +#include "netlink.h" +#include "common.h" +#include "bitset.h" +#include "module_fw.h" + +struct module_req_info { + struct ethnl_req_info base; +}; + +struct module_reply_data { + struct ethnl_reply_data base; + struct ethtool_module_power_mode_params power; +}; + +#define MODULE_REPDATA(__reply_base) \ + container_of(__reply_base, struct module_reply_data, base) + +/* MODULE_GET */ + +const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER + 1] = { + [ETHTOOL_A_MODULE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int module_get_power_mode(struct net_device *dev, + struct module_reply_data *data, + struct netlink_ext_ack *extack) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_module_power_mode) + return 0; + + if (dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(extack, + "Module firmware flashing is in progress"); + return -EBUSY; + } + + return ops->get_module_power_mode(dev, &data->power, extack); +} + +static int module_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct module_reply_data *data = MODULE_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + ret = module_get_power_mode(dev, data, info->extack); + if (ret < 0) + goto out_complete; + +out_complete: + ethnl_ops_complete(dev); + return ret; +} + +static int module_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + struct module_reply_data *data = MODULE_REPDATA(reply_base); + int len = 0; + + if (data->power.policy) + len += nla_total_size(sizeof(u8)); /* _MODULE_POWER_MODE_POLICY */ + + if (data->power.mode) + len += nla_total_size(sizeof(u8)); /* _MODULE_POWER_MODE */ + + return len; +} + +static int module_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct module_reply_data *data = MODULE_REPDATA(reply_base); + + if (data->power.policy && + nla_put_u8(skb, ETHTOOL_A_MODULE_POWER_MODE_POLICY, + data->power.policy)) + return -EMSGSIZE; + + if (data->power.mode && + nla_put_u8(skb, ETHTOOL_A_MODULE_POWER_MODE, data->power.mode)) + return -EMSGSIZE; + + return 0; +} + +/* MODULE_SET */ + +const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1] = { + [ETHTOOL_A_MODULE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_MODULE_POWER_MODE_POLICY] = + NLA_POLICY_RANGE(NLA_U8, ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH, + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO), +}; + +static int +ethnl_set_module_validate(struct ethnl_req_info *req_info, + struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + struct nlattr **tb = info->attrs; + + if (!tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]) + return 0; + + if (req_info->dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(info->extack, + "Module firmware flashing is in progress"); + return -EBUSY; + } + + if (!ops->get_module_power_mode || !ops->set_module_power_mode) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY], + "Setting power mode policy is not supported by this device"); + return -EOPNOTSUPP; + } + + return 1; +} + +static int +ethnl_set_module(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct ethtool_module_power_mode_params power = {}; + struct ethtool_module_power_mode_params power_new; + const struct ethtool_ops *ops; + struct net_device *dev = req_info->dev; + struct nlattr **tb = info->attrs; + int ret; + + ops = dev->ethtool_ops; + + power_new.policy = nla_get_u8(tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]); + ret = ops->get_module_power_mode(dev, &power, info->extack); + if (ret < 0) + return ret; + + if (power_new.policy == power.policy) + return 0; + + ret = ops->set_module_power_mode(dev, &power_new, info->extack); + return ret < 0 ? ret : 1; +} + +const struct ethnl_request_ops ethnl_module_request_ops = { + .request_cmd = ETHTOOL_MSG_MODULE_GET, + .reply_cmd = ETHTOOL_MSG_MODULE_GET_REPLY, + .hdr_attr = ETHTOOL_A_MODULE_HEADER, + .req_info_size = sizeof(struct module_req_info), + .reply_data_size = sizeof(struct module_reply_data), + + .prepare_data = module_prepare_data, + .reply_size = module_reply_size, + .fill_reply = module_fill_reply, + + .set_validate = ethnl_set_module_validate, + .set = ethnl_set_module, + .set_ntf_cmd = ETHTOOL_MSG_MODULE_NTF, +}; + +/* MODULE_FW_FLASH_ACT */ + +const struct nla_policy +ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1] = { + [ETHTOOL_A_MODULE_FW_FLASH_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME] = { .type = NLA_NUL_STRING }, + [ETHTOOL_A_MODULE_FW_FLASH_PASSWORD] = { .type = NLA_U32 }, +}; + +static LIST_HEAD(module_fw_flash_work_list); +static DEFINE_SPINLOCK(module_fw_flash_work_list_lock); + +static int +module_flash_fw_work_list_add(struct ethtool_module_fw_flash *module_fw, + struct genl_info *info) +{ + struct ethtool_module_fw_flash *work; + + /* First, check if already registered. */ + spin_lock(&module_fw_flash_work_list_lock); + list_for_each_entry(work, &module_fw_flash_work_list, list) { + if (work->fw_update.ntf_params.portid == info->snd_portid && + work->fw_update.dev == module_fw->fw_update.dev) { + spin_unlock(&module_fw_flash_work_list_lock); + return -EALREADY; + } + } + + list_add_tail(&module_fw->list, &module_fw_flash_work_list); + spin_unlock(&module_fw_flash_work_list_lock); + + return 0; +} + +static void module_flash_fw_work_list_del(struct list_head *list) +{ + spin_lock(&module_fw_flash_work_list_lock); + list_del(list); + spin_unlock(&module_fw_flash_work_list_lock); +} + +static void module_flash_fw_work(struct work_struct *work) +{ + struct ethtool_module_fw_flash *module_fw; + + module_fw = container_of(work, struct ethtool_module_fw_flash, work); + + ethtool_cmis_fw_update(&module_fw->fw_update); + + module_flash_fw_work_list_del(&module_fw->list); + module_fw->fw_update.dev->ethtool->module_fw_flash_in_progress = false; + netdev_put(module_fw->fw_update.dev, &module_fw->dev_tracker); + release_firmware(module_fw->fw_update.fw); + kfree(module_fw); +} + +#define MODULE_EEPROM_PHYS_ID_PAGE 0 +#define MODULE_EEPROM_PHYS_ID_I2C_ADDR 0x50 + +static int module_flash_fw_work_init(struct ethtool_module_fw_flash *module_fw, + struct net_device *dev, + struct netlink_ext_ack *extack) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_module_eeprom page_data = {}; + u8 phys_id; + int err; + + /* Fetch the SFF-8024 Identifier Value. For all supported standards, it + * is located at I2C address 0x50, byte 0. See section 4.1 in SFF-8024, + * revision 4.9. + */ + page_data.page = MODULE_EEPROM_PHYS_ID_PAGE; + page_data.offset = SFP_PHYS_ID; + page_data.length = sizeof(phys_id); + page_data.i2c_address = MODULE_EEPROM_PHYS_ID_I2C_ADDR; + page_data.data = &phys_id; + + err = ops->get_module_eeprom_by_page(dev, &page_data, extack); + if (err < 0) + return err; + + switch (phys_id) { + case SFF8024_ID_QSFP_DD: + case SFF8024_ID_OSFP: + case SFF8024_ID_DSFP: + case SFF8024_ID_QSFP_PLUS_CMIS: + case SFF8024_ID_SFP_DD_CMIS: + case SFF8024_ID_SFP_PLUS_CMIS: + INIT_WORK(&module_fw->work, module_flash_fw_work); + break; + default: + NL_SET_ERR_MSG(extack, + "Module type does not support firmware flashing"); + return -EOPNOTSUPP; + } + + return 0; +} + +void ethnl_module_fw_flash_sock_destroy(struct ethnl_sock_priv *sk_priv) +{ + struct ethtool_module_fw_flash *work; + + spin_lock(&module_fw_flash_work_list_lock); + list_for_each_entry(work, &module_fw_flash_work_list, list) { + if (work->fw_update.dev == sk_priv->dev && + work->fw_update.ntf_params.portid == sk_priv->portid) { + work->fw_update.ntf_params.closed_sock = true; + break; + } + } + spin_unlock(&module_fw_flash_work_list_lock); +} + +static int +module_flash_fw_schedule(struct net_device *dev, const char *file_name, + struct ethtool_module_fw_flash_params *params, + struct sk_buff *skb, struct genl_info *info) +{ + struct ethtool_cmis_fw_update_params *fw_update; + struct ethtool_module_fw_flash *module_fw; + int err; + + module_fw = kzalloc(sizeof(*module_fw), GFP_KERNEL); + if (!module_fw) + return -ENOMEM; + + fw_update = &module_fw->fw_update; + fw_update->params = *params; + err = request_firmware_direct(&fw_update->fw, + file_name, &dev->dev); + if (err) { + NL_SET_ERR_MSG(info->extack, + "Failed to request module firmware image"); + goto err_free; + } + + err = module_flash_fw_work_init(module_fw, dev, info->extack); + if (err < 0) + goto err_release_firmware; + + dev->ethtool->module_fw_flash_in_progress = true; + netdev_hold(dev, &module_fw->dev_tracker, GFP_KERNEL); + fw_update->dev = dev; + fw_update->ntf_params.portid = info->snd_portid; + fw_update->ntf_params.seq = info->snd_seq; + fw_update->ntf_params.closed_sock = false; + + err = ethnl_sock_priv_set(skb, dev, fw_update->ntf_params.portid, + ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH); + if (err < 0) + goto err_release_firmware; + + err = module_flash_fw_work_list_add(module_fw, info); + if (err < 0) + goto err_release_firmware; + + schedule_work(&module_fw->work); + + return 0; + +err_release_firmware: + release_firmware(fw_update->fw); +err_free: + kfree(module_fw); + return err; +} + +static int module_flash_fw(struct net_device *dev, struct nlattr **tb, + struct sk_buff *skb, struct genl_info *info) +{ + struct ethtool_module_fw_flash_params params = {}; + const char *file_name; + struct nlattr *attr; + + if (GENL_REQ_ATTR_CHECK(info, ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME)) + return -EINVAL; + + file_name = nla_data(tb[ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME]); + + attr = tb[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD]; + if (attr) { + params.password = cpu_to_be32(nla_get_u32(attr)); + params.password_valid = true; + } + + return module_flash_fw_schedule(dev, file_name, ¶ms, skb, info); +} + +static int ethnl_module_fw_flash_validate(struct net_device *dev, + struct netlink_ext_ack *extack) +{ + struct devlink_port *devlink_port = dev->devlink_port; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->set_module_eeprom_by_page || + !ops->get_module_eeprom_by_page) { + NL_SET_ERR_MSG(extack, + "Flashing module firmware is not supported by this device"); + return -EOPNOTSUPP; + } + + if (!ops->reset) { + NL_SET_ERR_MSG(extack, + "Reset module is not supported by this device, so flashing is not permitted"); + return -EOPNOTSUPP; + } + + if (dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(extack, "Module firmware flashing already in progress"); + return -EBUSY; + } + + if (dev->flags & IFF_UP) { + NL_SET_ERR_MSG(extack, "Netdevice is up, so flashing is not permitted"); + return -EBUSY; + } + + if (devlink_port && devlink_port->attrs.split) { + NL_SET_ERR_MSG(extack, "Can't perform firmware flashing on a split port"); + return -EOPNOTSUPP; + } + + return 0; +} + +int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info) +{ + struct ethnl_req_info req_info = {}; + struct nlattr **tb = info->attrs; + struct net_device *dev; + int ret; + + ret = ethnl_parse_header_dev_get(&req_info, + tb[ETHTOOL_A_MODULE_FW_FLASH_HEADER], + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + dev = req_info.dev; + + rtnl_lock(); + netdev_lock_ops(dev); + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto out_unlock; + + ret = ethnl_module_fw_flash_validate(dev, info->extack); + if (ret < 0) + goto out_unlock; + + ret = module_flash_fw(dev, tb, skb, info); + + ethnl_ops_complete(dev); + +out_unlock: + netdev_unlock_ops(dev); + rtnl_unlock(); + ethnl_parse_header_dev_put(&req_info); + return ret; +} + +/* MODULE_FW_FLASH_NTF */ + +static int +ethnl_module_fw_flash_ntf_put_err(struct sk_buff *skb, char *err_msg, + char *sub_err_msg) +{ + int err_msg_len, sub_err_msg_len, total_len; + struct nlattr *attr; + + if (!err_msg) + return 0; + + err_msg_len = strlen(err_msg); + total_len = err_msg_len + 2; /* For period and NUL. */ + + if (sub_err_msg) { + sub_err_msg_len = strlen(sub_err_msg); + total_len += sub_err_msg_len + 2; /* For ", ". */ + } + + attr = nla_reserve(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG, + total_len); + if (!attr) + return -ENOMEM; + + if (sub_err_msg) + sprintf(nla_data(attr), "%s, %s.", err_msg, sub_err_msg); + else + sprintf(nla_data(attr), "%s.", err_msg); + + return 0; +} + +static void +ethnl_module_fw_flash_ntf(struct net_device *dev, + enum ethtool_module_fw_flash_status status, + struct ethnl_module_fw_flash_ntf_params *ntf_params, + char *err_msg, char *sub_err_msg, + u64 done, u64 total) +{ + struct sk_buff *skb; + void *hdr; + int ret; + + if (ntf_params->closed_sock) + return; + + skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return; + + hdr = ethnl_unicast_put(skb, ntf_params->portid, ++ntf_params->seq, + ETHTOOL_MSG_MODULE_FW_FLASH_NTF); + if (!hdr) + goto err_skb; + + ret = ethnl_fill_reply_header(skb, dev, + ETHTOOL_A_MODULE_FW_FLASH_HEADER); + if (ret < 0) + goto err_skb; + + if (nla_put_u32(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS, status)) + goto err_skb; + + ret = ethnl_module_fw_flash_ntf_put_err(skb, err_msg, sub_err_msg); + if (ret < 0) + goto err_skb; + + if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_DONE, done)) + goto err_skb; + + if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_TOTAL, total)) + goto err_skb; + + genlmsg_end(skb, hdr); + genlmsg_unicast(dev_net(dev), skb, ntf_params->portid); + return; + +err_skb: + nlmsg_free(skb); +} + +void ethnl_module_fw_flash_ntf_err(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params, + char *err_msg, char *sub_err_msg) +{ + ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR, + params, err_msg, sub_err_msg, 0, 0); +} + +void +ethnl_module_fw_flash_ntf_start(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params) +{ + ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED, + params, NULL, NULL, 0, 0); +} + +void +ethnl_module_fw_flash_ntf_complete(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params) +{ + ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED, + params, NULL, NULL, 0, 0); +} + +void +ethnl_module_fw_flash_ntf_in_progress(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params, + u64 done, u64 total) +{ + ethnl_module_fw_flash_ntf(dev, + ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS, + params, NULL, NULL, done, total); +} diff --git a/net/ethtool/module_fw.h b/net/ethtool/module_fw.h new file mode 100644 index 000000000000..634543a12d0c --- /dev/null +++ b/net/ethtool/module_fw.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include <uapi/linux/ethtool.h> +#include "netlink.h" + +/** + * struct ethnl_module_fw_flash_ntf_params - module firmware flashing + * notifications parameters + * @portid: Netlink portid of sender. + * @seq: Sequence number of sender. + * @closed_sock: Indicates whether the socket was closed from user space. + */ +struct ethnl_module_fw_flash_ntf_params { + u32 portid; + u32 seq; + bool closed_sock; +}; + +/** + * struct ethtool_module_fw_flash_params - module firmware flashing parameters + * @password: Module password. Only valid when @pass_valid is set. + * @password_valid: Whether the module password is valid or not. + */ +struct ethtool_module_fw_flash_params { + __be32 password; + u8 password_valid:1; +}; + +/** + * struct ethtool_cmis_fw_update_params - CMIS firmware update specific + * parameters + * @dev: Pointer to the net_device to be flashed. + * @params: Module firmware flashing parameters. + * @ntf_params: Module firmware flashing notification parameters. + * @fw: Firmware to flash. + */ +struct ethtool_cmis_fw_update_params { + struct net_device *dev; + struct ethtool_module_fw_flash_params params; + struct ethnl_module_fw_flash_ntf_params ntf_params; + const struct firmware *fw; +}; + +/** + * struct ethtool_module_fw_flash - module firmware flashing + * @list: List node for &module_fw_flash_work_list. + * @dev_tracker: Refcount tracker for @dev. + * @work: The flashing firmware work. + * @fw_update: CMIS firmware update specific parameters. + */ +struct ethtool_module_fw_flash { + struct list_head list; + netdevice_tracker dev_tracker; + struct work_struct work; + struct ethtool_cmis_fw_update_params fw_update; +}; + +void ethnl_module_fw_flash_sock_destroy(struct ethnl_sock_priv *sk_priv); + +void +ethnl_module_fw_flash_ntf_err(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params, + char *err_msg, char *sub_err_msg); +void +ethnl_module_fw_flash_ntf_start(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params); +void +ethnl_module_fw_flash_ntf_complete(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params); +void +ethnl_module_fw_flash_ntf_in_progress(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params, + u64 done, u64 total); + +void ethtool_cmis_fw_update(struct ethtool_cmis_fw_update_params *params); diff --git a/net/ethtool/mse.c b/net/ethtool/mse.c new file mode 100644 index 000000000000..6aac004c3ffc --- /dev/null +++ b/net/ethtool/mse.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool.h> +#include <linux/phy.h> +#include <linux/slab.h> + +#include "netlink.h" +#include "common.h" + +/* Channels A-D only; WORST and LINK are exclusive alternatives */ +#define PHY_MSE_CHANNEL_COUNT 4 + +struct mse_req_info { + struct ethnl_req_info base; +}; + +struct mse_snapshot_entry { + struct phy_mse_snapshot snapshot; + int channel; +}; + +struct mse_reply_data { + struct ethnl_reply_data base; + struct phy_mse_capability capability; + struct mse_snapshot_entry *snapshots; + unsigned int num_snapshots; +}; + +static struct mse_reply_data * +mse_repdata(const struct ethnl_reply_data *reply_base) +{ + return container_of(reply_base, struct mse_reply_data, base); +} + +const struct nla_policy ethnl_mse_get_policy[] = { + [ETHTOOL_A_MSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_phy), +}; + +static int get_snapshot_if_supported(struct phy_device *phydev, + struct mse_reply_data *data, + unsigned int *idx, u32 cap_bit, + enum phy_mse_channel channel) +{ + int ret; + + if (data->capability.supported_caps & cap_bit) { + ret = phydev->drv->get_mse_snapshot(phydev, channel, + &data->snapshots[*idx].snapshot); + if (ret) + return ret; + data->snapshots[*idx].channel = channel; + (*idx)++; + } + + return 0; +} + +static int mse_get_channels(struct phy_device *phydev, + struct mse_reply_data *data) +{ + unsigned int i = 0; + int ret; + + if (!data->capability.supported_caps) + return 0; + + data->snapshots = kcalloc(PHY_MSE_CHANNEL_COUNT, + sizeof(*data->snapshots), GFP_KERNEL); + if (!data->snapshots) + return -ENOMEM; + + /* Priority 1: Individual channels */ + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_CHANNEL_A, + PHY_MSE_CHANNEL_A); + if (ret) + return ret; + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_CHANNEL_B, + PHY_MSE_CHANNEL_B); + if (ret) + return ret; + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_CHANNEL_C, + PHY_MSE_CHANNEL_C); + if (ret) + return ret; + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_CHANNEL_D, + PHY_MSE_CHANNEL_D); + if (ret) + return ret; + + /* If any individual channels were found, we are done. */ + if (i > 0) { + data->num_snapshots = i; + return 0; + } + + /* Priority 2: Worst channel, if no individual channels supported. */ + ret = get_snapshot_if_supported(phydev, data, &i, + PHY_MSE_CAP_WORST_CHANNEL, + PHY_MSE_CHANNEL_WORST); + if (ret) + return ret; + + /* If worst channel was found, we are done. */ + if (i > 0) { + data->num_snapshots = i; + return 0; + } + + /* Priority 3: Link-wide, if nothing else is supported. */ + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_LINK, + PHY_MSE_CHANNEL_LINK); + if (ret) + return ret; + + data->num_snapshots = i; + return 0; +} + +static int mse_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct mse_reply_data *data = mse_repdata(reply_base); + struct net_device *dev = reply_base->dev; + struct phy_device *phydev; + int ret; + + phydev = ethnl_req_get_phydev(req_base, info->attrs, + ETHTOOL_A_MSE_HEADER, info->extack); + if (IS_ERR(phydev)) + return PTR_ERR(phydev); + if (!phydev) + return -EOPNOTSUPP; + + ret = ethnl_ops_begin(dev); + if (ret) + return ret; + + mutex_lock(&phydev->lock); + + if (!phydev->drv || !phydev->drv->get_mse_capability || + !phydev->drv->get_mse_snapshot) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + if (!phydev->link) { + ret = -ENETDOWN; + goto out_unlock; + } + + ret = phydev->drv->get_mse_capability(phydev, &data->capability); + if (ret) + goto out_unlock; + + ret = mse_get_channels(phydev, data); + +out_unlock: + mutex_unlock(&phydev->lock); + ethnl_ops_complete(dev); + if (ret) + kfree(data->snapshots); + return ret; +} + +static void mse_cleanup_data(struct ethnl_reply_data *reply_base) +{ + struct mse_reply_data *data = mse_repdata(reply_base); + + kfree(data->snapshots); +} + +static int mse_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct mse_reply_data *data = mse_repdata(reply_base); + size_t len = 0; + unsigned int i; + + /* ETHTOOL_A_MSE_CAPABILITIES */ + len += nla_total_size(0); + if (data->capability.supported_caps & PHY_MSE_CAP_AVG) + /* ETHTOOL_A_MSE_CAPABILITIES_MAX_AVERAGE_MSE */ + len += nla_total_size(sizeof(u64)); + if (data->capability.supported_caps & (PHY_MSE_CAP_PEAK | + PHY_MSE_CAP_WORST_PEAK)) + /* ETHTOOL_A_MSE_CAPABILITIES_MAX_PEAK_MSE */ + len += nla_total_size(sizeof(u64)); + /* ETHTOOL_A_MSE_CAPABILITIES_REFRESH_RATE_PS */ + len += nla_total_size(sizeof(u64)); + /* ETHTOOL_A_MSE_CAPABILITIES_NUM_SYMBOLS */ + len += nla_total_size(sizeof(u64)); + + for (i = 0; i < data->num_snapshots; i++) { + size_t snapshot_len = 0; + + /* Per-channel nest (e.g., ETHTOOL_A_MSE_CHANNEL_A / _B / _C / + * _D / _WORST_CHANNEL / _LINK) + */ + snapshot_len += nla_total_size(0); + + if (data->capability.supported_caps & PHY_MSE_CAP_AVG) + snapshot_len += nla_total_size(sizeof(u64)); + if (data->capability.supported_caps & PHY_MSE_CAP_PEAK) + snapshot_len += nla_total_size(sizeof(u64)); + if (data->capability.supported_caps & PHY_MSE_CAP_WORST_PEAK) + snapshot_len += nla_total_size(sizeof(u64)); + + len += snapshot_len; + } + + return len; +} + +static int mse_channel_to_attr(int ch) +{ + switch (ch) { + case PHY_MSE_CHANNEL_A: + return ETHTOOL_A_MSE_CHANNEL_A; + case PHY_MSE_CHANNEL_B: + return ETHTOOL_A_MSE_CHANNEL_B; + case PHY_MSE_CHANNEL_C: + return ETHTOOL_A_MSE_CHANNEL_C; + case PHY_MSE_CHANNEL_D: + return ETHTOOL_A_MSE_CHANNEL_D; + case PHY_MSE_CHANNEL_WORST: + return ETHTOOL_A_MSE_WORST_CHANNEL; + case PHY_MSE_CHANNEL_LINK: + return ETHTOOL_A_MSE_LINK; + default: + return -EINVAL; + } +} + +static int mse_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct mse_reply_data *data = mse_repdata(reply_base); + struct nlattr *nest; + unsigned int i; + int ret; + + nest = nla_nest_start(skb, ETHTOOL_A_MSE_CAPABILITIES); + if (!nest) + return -EMSGSIZE; + + if (data->capability.supported_caps & PHY_MSE_CAP_AVG) { + ret = nla_put_uint(skb, + ETHTOOL_A_MSE_CAPABILITIES_MAX_AVERAGE_MSE, + data->capability.max_average_mse); + if (ret < 0) + goto nla_put_nest_failure; + } + + if (data->capability.supported_caps & (PHY_MSE_CAP_PEAK | + PHY_MSE_CAP_WORST_PEAK)) { + ret = nla_put_uint(skb, ETHTOOL_A_MSE_CAPABILITIES_MAX_PEAK_MSE, + data->capability.max_peak_mse); + if (ret < 0) + goto nla_put_nest_failure; + } + + ret = nla_put_uint(skb, ETHTOOL_A_MSE_CAPABILITIES_REFRESH_RATE_PS, + data->capability.refresh_rate_ps); + if (ret < 0) + goto nla_put_nest_failure; + + ret = nla_put_uint(skb, ETHTOOL_A_MSE_CAPABILITIES_NUM_SYMBOLS, + data->capability.num_symbols); + if (ret < 0) + goto nla_put_nest_failure; + + nla_nest_end(skb, nest); + + for (i = 0; i < data->num_snapshots; i++) { + const struct mse_snapshot_entry *s = &data->snapshots[i]; + int chan_attr; + + chan_attr = mse_channel_to_attr(s->channel); + if (chan_attr < 0) + return chan_attr; + + nest = nla_nest_start(skb, chan_attr); + if (!nest) + return -EMSGSIZE; + + if (data->capability.supported_caps & PHY_MSE_CAP_AVG) { + ret = nla_put_uint(skb, + ETHTOOL_A_MSE_SNAPSHOT_AVERAGE_MSE, + s->snapshot.average_mse); + if (ret) + goto nla_put_nest_failure; + } + if (data->capability.supported_caps & PHY_MSE_CAP_PEAK) { + ret = nla_put_uint(skb, ETHTOOL_A_MSE_SNAPSHOT_PEAK_MSE, + s->snapshot.peak_mse); + if (ret) + goto nla_put_nest_failure; + } + if (data->capability.supported_caps & PHY_MSE_CAP_WORST_PEAK) { + ret = nla_put_uint(skb, + ETHTOOL_A_MSE_SNAPSHOT_WORST_PEAK_MSE, + s->snapshot.worst_peak_mse); + if (ret) + goto nla_put_nest_failure; + } + + nla_nest_end(skb, nest); + } + + return 0; + +nla_put_nest_failure: + nla_nest_cancel(skb, nest); + return ret; +} + +const struct ethnl_request_ops ethnl_mse_request_ops = { + .request_cmd = ETHTOOL_MSG_MSE_GET, + .reply_cmd = ETHTOOL_MSG_MSE_GET_REPLY, + .hdr_attr = ETHTOOL_A_MSE_HEADER, + .req_info_size = sizeof(struct mse_req_info), + .reply_data_size = sizeof(struct mse_reply_data), + + .prepare_data = mse_prepare_data, + .cleanup_data = mse_cleanup_data, + .reply_size = mse_reply_size, + .fill_reply = mse_fill_reply, +}; diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c new file mode 100644 index 000000000000..6e5f0f4f815a --- /dev/null +++ b/net/ethtool/netlink.c @@ -0,0 +1,1583 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <net/netdev_lock.h> +#include <net/netdev_queues.h> +#include <net/sock.h> +#include <linux/ethtool_netlink.h> +#include <linux/phy_link_topology.h> +#include <linux/pm_runtime.h> +#include "netlink.h" +#include "module_fw.h" + +static struct genl_family ethtool_genl_family; + +static bool ethnl_ok __read_mostly; +static u32 ethnl_bcast_seq; + +#define ETHTOOL_FLAGS_BASIC (ETHTOOL_FLAG_COMPACT_BITSETS | \ + ETHTOOL_FLAG_OMIT_REPLY) +#define ETHTOOL_FLAGS_STATS (ETHTOOL_FLAGS_BASIC | ETHTOOL_FLAG_STATS) + +const struct nla_policy ethnl_header_policy[] = { + [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 }, + [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING, + .len = ALTIFNAMSIZ - 1 }, + [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32, + ETHTOOL_FLAGS_BASIC), +}; + +const struct nla_policy ethnl_header_policy_stats[] = { + [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 }, + [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING, + .len = ALTIFNAMSIZ - 1 }, + [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32, + ETHTOOL_FLAGS_STATS), +}; + +const struct nla_policy ethnl_header_policy_phy[] = { + [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 }, + [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING, + .len = ALTIFNAMSIZ - 1 }, + [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32, + ETHTOOL_FLAGS_BASIC), + [ETHTOOL_A_HEADER_PHY_INDEX] = NLA_POLICY_MIN(NLA_U32, 1), +}; + +const struct nla_policy ethnl_header_policy_phy_stats[] = { + [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 }, + [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING, + .len = ALTIFNAMSIZ - 1 }, + [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32, + ETHTOOL_FLAGS_STATS), + [ETHTOOL_A_HEADER_PHY_INDEX] = NLA_POLICY_MIN(NLA_U32, 1), +}; + +int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid, + enum ethnl_sock_type type) +{ + struct ethnl_sock_priv *sk_priv; + + sk_priv = genl_sk_priv_get(ðtool_genl_family, NETLINK_CB(skb).sk); + if (IS_ERR(sk_priv)) + return PTR_ERR(sk_priv); + + sk_priv->dev = dev; + sk_priv->portid = portid; + sk_priv->type = type; + + return 0; +} + +static void ethnl_sock_priv_destroy(void *priv) +{ + struct ethnl_sock_priv *sk_priv = priv; + + switch (sk_priv->type) { + case ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH: + ethnl_module_fw_flash_sock_destroy(sk_priv); + break; + default: + break; + } +} + +u32 ethnl_bcast_seq_next(void) +{ + ASSERT_RTNL(); + return ++ethnl_bcast_seq; +} + +int ethnl_ops_begin(struct net_device *dev) +{ + int ret; + + if (!dev) + return -ENODEV; + + if (dev->dev.parent) + pm_runtime_get_sync(dev->dev.parent); + + netdev_ops_assert_locked(dev); + + if (!netif_device_present(dev) || + dev->reg_state >= NETREG_UNREGISTERING) { + ret = -ENODEV; + goto err; + } + + if (dev->ethtool_ops->begin) { + ret = dev->ethtool_ops->begin(dev); + if (ret) + goto err; + } + + return 0; +err: + if (dev->dev.parent) + pm_runtime_put(dev->dev.parent); + + return ret; +} + +void ethnl_ops_complete(struct net_device *dev) +{ + if (dev->ethtool_ops->complete) + dev->ethtool_ops->complete(dev); + + if (dev->dev.parent) + pm_runtime_put(dev->dev.parent); +} + +/** + * ethnl_parse_header_dev_get() - parse request header + * @req_info: structure to put results into + * @header: nest attribute with request header + * @net: request netns + * @extack: netlink extack for error reporting + * @require_dev: fail if no device identified in header + * + * Parse request header in nested attribute @nest and puts results into + * the structure pointed to by @req_info. Extack from @info is used for error + * reporting. If req_info->dev is not null on return, reference to it has + * been taken. If error is returned, *req_info is null initialized and no + * reference is held. + * + * Return: 0 on success or negative error code + */ +int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, + const struct nlattr *header, struct net *net, + struct netlink_ext_ack *extack, bool require_dev) +{ + struct nlattr *tb[ARRAY_SIZE(ethnl_header_policy_phy)]; + const struct nlattr *devname_attr; + struct net_device *dev = NULL; + u32 flags = 0; + int ret; + + if (!header) { + if (!require_dev) + return 0; + NL_SET_ERR_MSG(extack, "request header missing"); + return -EINVAL; + } + /* No validation here, command policy should have a nested policy set + * for the header, therefore validation should have already been done. + */ + ret = nla_parse_nested(tb, ARRAY_SIZE(ethnl_header_policy_phy) - 1, header, + NULL, extack); + if (ret < 0) + return ret; + if (tb[ETHTOOL_A_HEADER_FLAGS]) + flags = nla_get_u32(tb[ETHTOOL_A_HEADER_FLAGS]); + + devname_attr = tb[ETHTOOL_A_HEADER_DEV_NAME]; + if (tb[ETHTOOL_A_HEADER_DEV_INDEX]) { + u32 ifindex = nla_get_u32(tb[ETHTOOL_A_HEADER_DEV_INDEX]); + + dev = netdev_get_by_index(net, ifindex, &req_info->dev_tracker, + GFP_KERNEL); + if (!dev) { + NL_SET_ERR_MSG_ATTR(extack, + tb[ETHTOOL_A_HEADER_DEV_INDEX], + "no device matches ifindex"); + return -ENODEV; + } + /* if both ifindex and ifname are passed, they must match */ + if (devname_attr && + strncmp(dev->name, nla_data(devname_attr), IFNAMSIZ)) { + netdev_put(dev, &req_info->dev_tracker); + NL_SET_ERR_MSG_ATTR(extack, header, + "ifindex and name do not match"); + return -ENODEV; + } + } else if (devname_attr) { + dev = netdev_get_by_name(net, nla_data(devname_attr), + &req_info->dev_tracker, GFP_KERNEL); + if (!dev) { + NL_SET_ERR_MSG_ATTR(extack, devname_attr, + "no device matches name"); + return -ENODEV; + } + } else if (require_dev) { + NL_SET_ERR_MSG_ATTR(extack, header, + "neither ifindex nor name specified"); + return -EINVAL; + } + + if (tb[ETHTOOL_A_HEADER_PHY_INDEX]) { + if (dev) { + req_info->phy_index = nla_get_u32(tb[ETHTOOL_A_HEADER_PHY_INDEX]); + } else { + NL_SET_ERR_MSG_ATTR(extack, header, + "phy_index set without a netdev"); + return -EINVAL; + } + } + + req_info->dev = dev; + req_info->flags = flags; + return 0; +} + +struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, + struct nlattr **tb, unsigned int header, + struct netlink_ext_ack *extack) +{ + struct phy_device *phydev; + + ASSERT_RTNL(); + + if (!req_info->dev) + return NULL; + + if (!req_info->phy_index) + return req_info->dev->phydev; + + phydev = phy_link_topo_get_phy(req_info->dev, req_info->phy_index); + if (!phydev && tb) { + NL_SET_ERR_MSG_ATTR(extack, tb[header], + "no phy matching phyindex"); + return ERR_PTR(-ENODEV); + } + + return phydev; +} + +/** + * ethnl_fill_reply_header() - Put common header into a reply message + * @skb: skb with the message + * @dev: network device to describe in header + * @attrtype: attribute type to use for the nest + * + * Create a nested attribute with attributes describing given network device. + * + * Return: 0 on success, error value (-EMSGSIZE only) on error + */ +int ethnl_fill_reply_header(struct sk_buff *skb, struct net_device *dev, + u16 attrtype) +{ + struct nlattr *nest; + + if (!dev) + return 0; + nest = nla_nest_start(skb, attrtype); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_HEADER_DEV_INDEX, (u32)dev->ifindex) || + nla_put_string(skb, ETHTOOL_A_HEADER_DEV_NAME, dev->name)) + goto nla_put_failure; + /* If more attributes are put into reply header, ethnl_header_size() + * must be updated to account for them. + */ + + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +/** + * ethnl_reply_init() - Create skb for a reply and fill device identification + * @payload: payload length (without netlink and genetlink header) + * @dev: device the reply is about (may be null) + * @cmd: ETHTOOL_MSG_* message type for reply + * @hdr_attrtype: attribute type for common header + * @info: genetlink info of the received packet we respond to + * @ehdrp: place to store payload pointer returned by genlmsg_new() + * + * Return: pointer to allocated skb on success, NULL on error + */ +struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd, + u16 hdr_attrtype, struct genl_info *info, + void **ehdrp) +{ + struct sk_buff *skb; + + skb = genlmsg_new(payload, GFP_KERNEL); + if (!skb) + goto err; + *ehdrp = genlmsg_put_reply(skb, info, ðtool_genl_family, 0, cmd); + if (!*ehdrp) + goto err_free; + + if (dev) { + int ret; + + ret = ethnl_fill_reply_header(skb, dev, hdr_attrtype); + if (ret < 0) + goto err_free; + } + return skb; + +err_free: + nlmsg_free(skb); +err: + if (info) + GENL_SET_ERR_MSG(info, "failed to setup reply message"); + return NULL; +} + +void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd) +{ + return genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + ðtool_genl_family, 0, cmd); +} + +void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd) +{ + return genlmsg_put(skb, 0, ++ethnl_bcast_seq, ðtool_genl_family, 0, + cmd); +} + +void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd) +{ + return genlmsg_put(skb, portid, seq, ðtool_genl_family, 0, cmd); +} + +int ethnl_multicast(struct sk_buff *skb, struct net_device *dev) +{ + return genlmsg_multicast_netns(ðtool_genl_family, dev_net(dev), skb, + 0, ETHNL_MCGRP_MONITOR, GFP_KERNEL); +} + +/* GET request helpers */ + +/** + * struct ethnl_dump_ctx - context structure for generic dumpit() callback + * @ops: request ops of currently processed message type + * @req_info: parsed request header of processed request + * @reply_data: data needed to compose the reply + * @pos_ifindex: saved iteration position - ifindex + * + * These parameters are kept in struct netlink_callback as context preserved + * between iterations. They are initialized by ethnl_default_start() and used + * in ethnl_default_dumpit() and ethnl_default_done(). + */ +struct ethnl_dump_ctx { + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct ethnl_reply_data *reply_data; + unsigned long pos_ifindex; +}; + +/** + * struct ethnl_perphy_dump_ctx - context for dumpit() PHY-aware callbacks + * @ethnl_ctx: generic ethnl context + * @ifindex: For Filtered DUMP requests, the ifindex of the targeted netdev + * @pos_phyindex: iterator position for multi-msg DUMP + */ +struct ethnl_perphy_dump_ctx { + struct ethnl_dump_ctx ethnl_ctx; + unsigned int ifindex; + unsigned long pos_phyindex; +}; + +static const struct ethnl_request_ops * +ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = { + [ETHTOOL_MSG_STRSET_GET] = ðnl_strset_request_ops, + [ETHTOOL_MSG_LINKINFO_GET] = ðnl_linkinfo_request_ops, + [ETHTOOL_MSG_LINKINFO_SET] = ðnl_linkinfo_request_ops, + [ETHTOOL_MSG_LINKMODES_GET] = ðnl_linkmodes_request_ops, + [ETHTOOL_MSG_LINKMODES_SET] = ðnl_linkmodes_request_ops, + [ETHTOOL_MSG_LINKSTATE_GET] = ðnl_linkstate_request_ops, + [ETHTOOL_MSG_DEBUG_GET] = ðnl_debug_request_ops, + [ETHTOOL_MSG_DEBUG_SET] = ðnl_debug_request_ops, + [ETHTOOL_MSG_WOL_GET] = ðnl_wol_request_ops, + [ETHTOOL_MSG_WOL_SET] = ðnl_wol_request_ops, + [ETHTOOL_MSG_FEATURES_GET] = ðnl_features_request_ops, + [ETHTOOL_MSG_PRIVFLAGS_GET] = ðnl_privflags_request_ops, + [ETHTOOL_MSG_PRIVFLAGS_SET] = ðnl_privflags_request_ops, + [ETHTOOL_MSG_RINGS_GET] = ðnl_rings_request_ops, + [ETHTOOL_MSG_RINGS_SET] = ðnl_rings_request_ops, + [ETHTOOL_MSG_CHANNELS_GET] = ðnl_channels_request_ops, + [ETHTOOL_MSG_CHANNELS_SET] = ðnl_channels_request_ops, + [ETHTOOL_MSG_COALESCE_GET] = ðnl_coalesce_request_ops, + [ETHTOOL_MSG_COALESCE_SET] = ðnl_coalesce_request_ops, + [ETHTOOL_MSG_PAUSE_GET] = ðnl_pause_request_ops, + [ETHTOOL_MSG_PAUSE_SET] = ðnl_pause_request_ops, + [ETHTOOL_MSG_EEE_GET] = ðnl_eee_request_ops, + [ETHTOOL_MSG_EEE_SET] = ðnl_eee_request_ops, + [ETHTOOL_MSG_FEC_GET] = ðnl_fec_request_ops, + [ETHTOOL_MSG_FEC_SET] = ðnl_fec_request_ops, + [ETHTOOL_MSG_TSINFO_GET] = ðnl_tsinfo_request_ops, + [ETHTOOL_MSG_MODULE_EEPROM_GET] = ðnl_module_eeprom_request_ops, + [ETHTOOL_MSG_STATS_GET] = ðnl_stats_request_ops, + [ETHTOOL_MSG_PHC_VCLOCKS_GET] = ðnl_phc_vclocks_request_ops, + [ETHTOOL_MSG_MODULE_GET] = ðnl_module_request_ops, + [ETHTOOL_MSG_MODULE_SET] = ðnl_module_request_ops, + [ETHTOOL_MSG_PSE_GET] = ðnl_pse_request_ops, + [ETHTOOL_MSG_PSE_SET] = ðnl_pse_request_ops, + [ETHTOOL_MSG_RSS_GET] = ðnl_rss_request_ops, + [ETHTOOL_MSG_RSS_SET] = ðnl_rss_request_ops, + [ETHTOOL_MSG_PLCA_GET_CFG] = ðnl_plca_cfg_request_ops, + [ETHTOOL_MSG_PLCA_SET_CFG] = ðnl_plca_cfg_request_ops, + [ETHTOOL_MSG_PLCA_GET_STATUS] = ðnl_plca_status_request_ops, + [ETHTOOL_MSG_MM_GET] = ðnl_mm_request_ops, + [ETHTOOL_MSG_MM_SET] = ðnl_mm_request_ops, + [ETHTOOL_MSG_TSCONFIG_GET] = ðnl_tsconfig_request_ops, + [ETHTOOL_MSG_TSCONFIG_SET] = ðnl_tsconfig_request_ops, + [ETHTOOL_MSG_PHY_GET] = ðnl_phy_request_ops, + [ETHTOOL_MSG_MSE_GET] = ðnl_mse_request_ops, +}; + +static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb) +{ + return (struct ethnl_dump_ctx *)cb->ctx; +} + +static struct ethnl_perphy_dump_ctx * +ethnl_perphy_dump_context(struct netlink_callback *cb) +{ + return (struct ethnl_perphy_dump_ctx *)cb->ctx; +} + +/** + * ethnl_default_parse() - Parse request message + * @req_info: pointer to structure to put data into + * @info: genl_info from the request + * @request_ops: struct request_ops for request type + * @require_dev: fail if no device identified in header + * + * Parse universal request header and call request specific ->parse_request() + * callback (if defined) to parse the rest of the message. + * + * Return: 0 on success or negative error code + */ +static int ethnl_default_parse(struct ethnl_req_info *req_info, + const struct genl_info *info, + const struct ethnl_request_ops *request_ops, + bool require_dev) +{ + struct nlattr **tb = info->attrs; + int ret; + + ret = ethnl_parse_header_dev_get(req_info, tb[request_ops->hdr_attr], + genl_info_net(info), info->extack, + require_dev); + if (ret < 0) + return ret; + + if (request_ops->parse_request) { + ret = request_ops->parse_request(req_info, tb, info->extack); + if (ret < 0) + goto err_dev; + } + + return 0; + +err_dev: + netdev_put(req_info->dev, &req_info->dev_tracker); + req_info->dev = NULL; + return ret; +} + +/** + * ethnl_init_reply_data() - Initialize reply data for GET request + * @reply_data: pointer to embedded struct ethnl_reply_data + * @ops: instance of struct ethnl_request_ops describing the layout + * @dev: network device to initialize the reply for + * + * Fills the reply data part with zeros and sets the dev member. Must be called + * before calling the ->fill_reply() callback (for each iteration when handling + * dump requests). + */ +static void ethnl_init_reply_data(struct ethnl_reply_data *reply_data, + const struct ethnl_request_ops *ops, + struct net_device *dev) +{ + memset(reply_data, 0, ops->reply_data_size); + reply_data->dev = dev; +} + +/* default ->doit() handler for GET type requests */ +static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct ethnl_reply_data *reply_data = NULL; + struct ethnl_req_info *req_info = NULL; + const u8 cmd = info->genlhdr->cmd; + const struct ethnl_request_ops *ops; + int hdr_len, reply_len; + struct sk_buff *rskb; + void *reply_payload; + int ret; + + ops = ethnl_default_requests[cmd]; + if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", cmd)) + return -EOPNOTSUPP; + if (GENL_REQ_ATTR_CHECK(info, ops->hdr_attr)) + return -EINVAL; + + req_info = kzalloc(ops->req_info_size, GFP_KERNEL); + if (!req_info) + return -ENOMEM; + reply_data = kmalloc(ops->reply_data_size, GFP_KERNEL); + if (!reply_data) { + kfree(req_info); + return -ENOMEM; + } + + ret = ethnl_default_parse(req_info, info, ops, !ops->allow_nodev_do); + if (ret < 0) + goto err_free; + ethnl_init_reply_data(reply_data, ops, req_info->dev); + + rtnl_lock(); + if (req_info->dev) + netdev_lock_ops(req_info->dev); + ret = ops->prepare_data(req_info, reply_data, info); + if (req_info->dev) + netdev_unlock_ops(req_info->dev); + rtnl_unlock(); + if (ret < 0) + goto err_dev; + ret = ops->reply_size(req_info, reply_data); + if (ret < 0) + goto err_cleanup; + reply_len = ret; + ret = -ENOMEM; + rskb = ethnl_reply_init(reply_len + ethnl_reply_header_size(), + req_info->dev, ops->reply_cmd, + ops->hdr_attr, info, &reply_payload); + if (!rskb) + goto err_cleanup; + hdr_len = rskb->len; + ret = ops->fill_reply(rskb, req_info, reply_data); + if (ret < 0) + goto err_msg; + WARN_ONCE(rskb->len - hdr_len > reply_len, + "ethnl cmd %d: calculated reply length %d, but consumed %d\n", + cmd, reply_len, rskb->len - hdr_len); + if (ops->cleanup_data) + ops->cleanup_data(reply_data); + + genlmsg_end(rskb, reply_payload); + netdev_put(req_info->dev, &req_info->dev_tracker); + kfree(reply_data); + kfree(req_info); + return genlmsg_reply(rskb, info); + +err_msg: + WARN_ONCE(ret == -EMSGSIZE, "calculated message payload length (%d) not sufficient\n", reply_len); + nlmsg_free(rskb); +err_cleanup: + if (ops->cleanup_data) + ops->cleanup_data(reply_data); +err_dev: + netdev_put(req_info->dev, &req_info->dev_tracker); +err_free: + kfree(reply_data); + kfree(req_info); + return ret; +} + +static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, + const struct ethnl_dump_ctx *ctx, + const struct genl_info *info) +{ + void *ehdr; + int ret; + + ehdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, + ðtool_genl_family, NLM_F_MULTI, + ctx->ops->reply_cmd); + if (!ehdr) + return -EMSGSIZE; + + ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev); + rtnl_lock(); + netdev_lock_ops(dev); + ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, info); + netdev_unlock_ops(dev); + rtnl_unlock(); + if (ret < 0) + goto out_cancel; + ret = ethnl_fill_reply_header(skb, dev, ctx->ops->hdr_attr); + if (ret < 0) + goto out; + ret = ctx->ops->fill_reply(skb, ctx->req_info, ctx->reply_data); + +out: + if (ctx->ops->cleanup_data) + ctx->ops->cleanup_data(ctx->reply_data); +out_cancel: + ctx->reply_data->dev = NULL; + if (ret < 0) + genlmsg_cancel(skb, ehdr); + else + genlmsg_end(skb, ehdr); + return ret; +} + +/* Default ->dumpit() handler for GET requests. */ +static int ethnl_default_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); + struct net *net = sock_net(skb->sk); + netdevice_tracker dev_tracker; + struct net_device *dev; + int ret = 0; + + rcu_read_lock(); + for_each_netdev_dump(net, dev, ctx->pos_ifindex) { + netdev_hold(dev, &dev_tracker, GFP_ATOMIC); + rcu_read_unlock(); + + ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb)); + + rcu_read_lock(); + netdev_put(dev, &dev_tracker); + + if (ret < 0 && ret != -EOPNOTSUPP) { + if (likely(skb->len)) + ret = skb->len; + break; + } + ret = 0; + } + rcu_read_unlock(); + + return ret; +} + +/* generic ->start() handler for GET requests */ +static int ethnl_default_start(struct netlink_callback *cb) +{ + const struct genl_dumpit_info *info = genl_dumpit_info(cb); + struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); + struct ethnl_reply_data *reply_data; + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct genlmsghdr *ghdr; + int ret; + + BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); + + ghdr = nlmsg_data(cb->nlh); + ops = ethnl_default_requests[ghdr->cmd]; + if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", ghdr->cmd)) + return -EOPNOTSUPP; + req_info = kzalloc(ops->req_info_size, GFP_KERNEL); + if (!req_info) + return -ENOMEM; + reply_data = kmalloc(ops->reply_data_size, GFP_KERNEL); + if (!reply_data) { + ret = -ENOMEM; + goto free_req_info; + } + + ret = ethnl_default_parse(req_info, &info->info, ops, false); + if (ret < 0) + goto free_reply_data; + if (req_info->dev) { + /* We ignore device specification in dump requests but as the + * same parser as for non-dump (doit) requests is used, it + * would take reference to the device if it finds one + */ + netdev_put(req_info->dev, &req_info->dev_tracker); + req_info->dev = NULL; + } + + ctx->ops = ops; + ctx->req_info = req_info; + ctx->reply_data = reply_data; + ctx->pos_ifindex = 0; + + return 0; + +free_reply_data: + kfree(reply_data); +free_req_info: + kfree(req_info); + + return ret; +} + +/* per-PHY ->start() handler for GET requests */ +static int ethnl_perphy_start(struct netlink_callback *cb) +{ + struct ethnl_perphy_dump_ctx *phy_ctx = ethnl_perphy_dump_context(cb); + const struct genl_dumpit_info *info = genl_dumpit_info(cb); + struct ethnl_dump_ctx *ctx = &phy_ctx->ethnl_ctx; + struct ethnl_reply_data *reply_data; + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct genlmsghdr *ghdr; + int ret; + + BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); + + ghdr = nlmsg_data(cb->nlh); + ops = ethnl_default_requests[ghdr->cmd]; + if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", ghdr->cmd)) + return -EOPNOTSUPP; + req_info = kzalloc(ops->req_info_size, GFP_KERNEL); + if (!req_info) + return -ENOMEM; + reply_data = kmalloc(ops->reply_data_size, GFP_KERNEL); + if (!reply_data) { + ret = -ENOMEM; + goto free_req_info; + } + + /* Unlike per-dev dump, don't ignore dev. The dump handler + * will notice it and dump PHYs from given dev. We only keep track of + * the dev's ifindex, .dumpit() will grab and release the netdev itself. + */ + ret = ethnl_default_parse(req_info, &info->info, ops, false); + if (ret < 0) + goto free_reply_data; + if (req_info->dev) { + phy_ctx->ifindex = req_info->dev->ifindex; + netdev_put(req_info->dev, &req_info->dev_tracker); + req_info->dev = NULL; + } + + ctx->ops = ops; + ctx->req_info = req_info; + ctx->reply_data = reply_data; + ctx->pos_ifindex = 0; + + return 0; + +free_reply_data: + kfree(reply_data); +free_req_info: + kfree(req_info); + + return ret; +} + +static int ethnl_perphy_dump_one_dev(struct sk_buff *skb, + struct ethnl_perphy_dump_ctx *ctx, + const struct genl_info *info) +{ + struct ethnl_dump_ctx *ethnl_ctx = &ctx->ethnl_ctx; + struct net_device *dev = ethnl_ctx->req_info->dev; + struct phy_device_node *pdn; + int ret; + + if (!dev->link_topo) + return 0; + + xa_for_each_start(&dev->link_topo->phys, ctx->pos_phyindex, pdn, + ctx->pos_phyindex) { + ethnl_ctx->req_info->phy_index = ctx->pos_phyindex; + + /* We can re-use the original dump_one as ->prepare_data in + * commands use ethnl_req_get_phydev(), which gets the PHY from + * the req_info->phy_index + */ + ret = ethnl_default_dump_one(skb, dev, ethnl_ctx, info); + if (ret) + return ret; + } + + ctx->pos_phyindex = 0; + + return 0; +} + +static int ethnl_perphy_dump_all_dev(struct sk_buff *skb, + struct ethnl_perphy_dump_ctx *ctx, + const struct genl_info *info) +{ + struct ethnl_dump_ctx *ethnl_ctx = &ctx->ethnl_ctx; + struct net *net = sock_net(skb->sk); + netdevice_tracker dev_tracker; + struct net_device *dev; + int ret = 0; + + rcu_read_lock(); + for_each_netdev_dump(net, dev, ethnl_ctx->pos_ifindex) { + netdev_hold(dev, &dev_tracker, GFP_ATOMIC); + rcu_read_unlock(); + + /* per-PHY commands use ethnl_req_get_phydev(), which needs the + * net_device in the req_info + */ + ethnl_ctx->req_info->dev = dev; + ret = ethnl_perphy_dump_one_dev(skb, ctx, info); + + rcu_read_lock(); + netdev_put(dev, &dev_tracker); + ethnl_ctx->req_info->dev = NULL; + + if (ret < 0 && ret != -EOPNOTSUPP) { + if (likely(skb->len)) + ret = skb->len; + break; + } + ret = 0; + } + rcu_read_unlock(); + + return ret; +} + +/* per-PHY ->dumpit() handler for GET requests. */ +static int ethnl_perphy_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct ethnl_perphy_dump_ctx *ctx = ethnl_perphy_dump_context(cb); + const struct genl_dumpit_info *info = genl_dumpit_info(cb); + struct ethnl_dump_ctx *ethnl_ctx = &ctx->ethnl_ctx; + int ret = 0; + + if (ctx->ifindex) { + netdevice_tracker dev_tracker; + struct net_device *dev; + + dev = netdev_get_by_index(genl_info_net(&info->info), + ctx->ifindex, &dev_tracker, + GFP_KERNEL); + if (!dev) + return -ENODEV; + + ethnl_ctx->req_info->dev = dev; + ret = ethnl_perphy_dump_one_dev(skb, ctx, genl_info_dump(cb)); + + if (ret < 0 && ret != -EOPNOTSUPP && likely(skb->len)) + ret = skb->len; + + netdev_put(dev, &dev_tracker); + } else { + ret = ethnl_perphy_dump_all_dev(skb, ctx, genl_info_dump(cb)); + } + + return ret; +} + +/* per-PHY ->done() handler for GET requests */ +static int ethnl_perphy_done(struct netlink_callback *cb) +{ + struct ethnl_perphy_dump_ctx *ctx = ethnl_perphy_dump_context(cb); + struct ethnl_dump_ctx *ethnl_ctx = &ctx->ethnl_ctx; + + kfree(ethnl_ctx->reply_data); + kfree(ethnl_ctx->req_info); + + return 0; +} + +/* default ->done() handler for GET requests */ +static int ethnl_default_done(struct netlink_callback *cb) +{ + struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); + + kfree(ctx->reply_data); + kfree(ctx->req_info); + + return 0; +} + +static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info) +{ + const struct ethnl_request_ops *ops; + const u8 cmd = info->genlhdr->cmd; + struct ethnl_req_info *req_info; + struct net_device *dev; + int ret; + + ops = ethnl_default_requests[cmd]; + if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", cmd)) + return -EOPNOTSUPP; + if (GENL_REQ_ATTR_CHECK(info, ops->hdr_attr)) + return -EINVAL; + + req_info = kzalloc(ops->req_info_size, GFP_KERNEL); + if (!req_info) + return -ENOMEM; + + ret = ethnl_default_parse(req_info, info, ops, true); + if (ret < 0) + goto out_free_req; + + if (ops->set_validate) { + ret = ops->set_validate(req_info, info); + /* 0 means nothing to do */ + if (ret <= 0) + goto out_dev; + } + + dev = req_info->dev; + + rtnl_lock(); + netdev_lock_ops(dev); + dev->cfg_pending = kmemdup(dev->cfg, sizeof(*dev->cfg), + GFP_KERNEL_ACCOUNT); + if (!dev->cfg_pending) { + ret = -ENOMEM; + goto out_tie_cfg; + } + + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto out_free_cfg; + + ret = ops->set(req_info, info); + if (ret < 0) + goto out_ops; + + swap(dev->cfg, dev->cfg_pending); + if (!ret) + goto out_ops; + ethnl_notify(dev, ops->set_ntf_cmd, req_info); + + ret = 0; +out_ops: + ethnl_ops_complete(dev); +out_free_cfg: + kfree(dev->cfg_pending); +out_tie_cfg: + dev->cfg_pending = dev->cfg; + netdev_unlock_ops(dev); + rtnl_unlock(); +out_dev: + ethnl_parse_header_dev_put(req_info); +out_free_req: + kfree(req_info); + return ret; +} + +static const struct ethnl_request_ops * +ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = { + [ETHTOOL_MSG_LINKINFO_NTF] = ðnl_linkinfo_request_ops, + [ETHTOOL_MSG_LINKMODES_NTF] = ðnl_linkmodes_request_ops, + [ETHTOOL_MSG_DEBUG_NTF] = ðnl_debug_request_ops, + [ETHTOOL_MSG_WOL_NTF] = ðnl_wol_request_ops, + [ETHTOOL_MSG_FEATURES_NTF] = ðnl_features_request_ops, + [ETHTOOL_MSG_PRIVFLAGS_NTF] = ðnl_privflags_request_ops, + [ETHTOOL_MSG_RINGS_NTF] = ðnl_rings_request_ops, + [ETHTOOL_MSG_CHANNELS_NTF] = ðnl_channels_request_ops, + [ETHTOOL_MSG_COALESCE_NTF] = ðnl_coalesce_request_ops, + [ETHTOOL_MSG_PAUSE_NTF] = ðnl_pause_request_ops, + [ETHTOOL_MSG_EEE_NTF] = ðnl_eee_request_ops, + [ETHTOOL_MSG_FEC_NTF] = ðnl_fec_request_ops, + [ETHTOOL_MSG_MODULE_NTF] = ðnl_module_request_ops, + [ETHTOOL_MSG_PLCA_NTF] = ðnl_plca_cfg_request_ops, + [ETHTOOL_MSG_MM_NTF] = ðnl_mm_request_ops, + [ETHTOOL_MSG_RSS_NTF] = ðnl_rss_request_ops, + [ETHTOOL_MSG_RSS_CREATE_NTF] = ðnl_rss_request_ops, +}; + +/* default notification handler */ +static void ethnl_default_notify(struct net_device *dev, unsigned int cmd, + const struct ethnl_req_info *orig_req_info) +{ + struct ethnl_reply_data *reply_data; + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct genl_info info; + struct sk_buff *skb; + void *reply_payload; + int reply_len; + int ret; + + genl_info_init_ntf(&info, ðtool_genl_family, cmd); + + if (WARN_ONCE(cmd > ETHTOOL_MSG_KERNEL_MAX || + !ethnl_default_notify_ops[cmd], + "unexpected notification type %u\n", cmd)) + return; + ops = ethnl_default_notify_ops[cmd]; + req_info = kzalloc(ops->req_info_size, GFP_KERNEL); + if (!req_info) + return; + reply_data = kmalloc(ops->reply_data_size, GFP_KERNEL); + if (!reply_data) { + kfree(req_info); + return; + } + + req_info->dev = dev; + req_info->flags |= ETHTOOL_FLAG_COMPACT_BITSETS; + if (orig_req_info) { + req_info->phy_index = orig_req_info->phy_index; + memcpy(&req_info[1], &orig_req_info[1], + ops->req_info_size - sizeof(*req_info)); + } + + netdev_ops_assert_locked(dev); + + ethnl_init_reply_data(reply_data, ops, dev); + ret = ops->prepare_data(req_info, reply_data, &info); + if (ret < 0) + goto err_rep; + ret = ops->reply_size(req_info, reply_data); + if (ret < 0) + goto err_cleanup; + reply_len = ret + ethnl_reply_header_size(); + skb = genlmsg_new(reply_len, GFP_KERNEL); + if (!skb) + goto err_cleanup; + reply_payload = ethnl_bcastmsg_put(skb, cmd); + if (!reply_payload) + goto err_skb; + ret = ethnl_fill_reply_header(skb, dev, ops->hdr_attr); + if (ret < 0) + goto err_msg; + ret = ops->fill_reply(skb, req_info, reply_data); + if (ret < 0) + goto err_msg; + if (ops->cleanup_data) + ops->cleanup_data(reply_data); + + genlmsg_end(skb, reply_payload); + kfree(reply_data); + kfree(req_info); + ethnl_multicast(skb, dev); + return; + +err_msg: + WARN_ONCE(ret == -EMSGSIZE, + "calculated message payload length (%d) not sufficient\n", + reply_len); +err_skb: + nlmsg_free(skb); +err_cleanup: + if (ops->cleanup_data) + ops->cleanup_data(reply_data); +err_rep: + kfree(reply_data); + kfree(req_info); + return; +} + +/* notifications */ + +typedef void (*ethnl_notify_handler_t)(struct net_device *dev, unsigned int cmd, + const struct ethnl_req_info *req_info); + +static const ethnl_notify_handler_t ethnl_notify_handlers[] = { + [ETHTOOL_MSG_LINKINFO_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_LINKMODES_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_DEBUG_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_WOL_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_FEATURES_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_PRIVFLAGS_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_RINGS_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_CHANNELS_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_COALESCE_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_PAUSE_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_EEE_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_FEC_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_MODULE_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_PLCA_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_MM_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_RSS_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_RSS_CREATE_NTF] = ethnl_default_notify, +}; + +void ethnl_notify(struct net_device *dev, unsigned int cmd, + const struct ethnl_req_info *req_info) +{ + if (unlikely(!ethnl_ok)) + return; + ASSERT_RTNL(); + + if (likely(cmd < ARRAY_SIZE(ethnl_notify_handlers) && + ethnl_notify_handlers[cmd])) + ethnl_notify_handlers[cmd](dev, cmd, req_info); + else + WARN_ONCE(1, "notification %u not implemented (dev=%s)\n", + cmd, netdev_name(dev)); +} + +void ethtool_notify(struct net_device *dev, unsigned int cmd) +{ + ethnl_notify(dev, cmd, NULL); +} +EXPORT_SYMBOL(ethtool_notify); + +static void ethnl_notify_features(struct netdev_notifier_info *info) +{ + struct net_device *dev = netdev_notifier_info_to_dev(info); + + ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF); +} + +static int ethnl_netdev_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct netdev_notifier_info *info = ptr; + struct netlink_ext_ack *extack; + struct net_device *dev; + + dev = netdev_notifier_info_to_dev(info); + extack = netdev_notifier_info_to_extack(info); + + switch (event) { + case NETDEV_FEAT_CHANGE: + ethnl_notify_features(ptr); + break; + case NETDEV_PRE_UP: + if (dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(extack, "Can't set port up while flashing module firmware"); + return NOTIFY_BAD; + } + } + + return NOTIFY_DONE; +} + +static struct notifier_block ethnl_netdev_notifier = { + .notifier_call = ethnl_netdev_event, +}; + +/* genetlink setup */ + +static const struct genl_ops ethtool_genl_ops[] = { + { + .cmd = ETHTOOL_MSG_STRSET_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_strset_get_policy, + .maxattr = ARRAY_SIZE(ethnl_strset_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_LINKINFO_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_linkinfo_get_policy, + .maxattr = ARRAY_SIZE(ethnl_linkinfo_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_LINKINFO_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_linkinfo_set_policy, + .maxattr = ARRAY_SIZE(ethnl_linkinfo_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_LINKMODES_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_linkmodes_get_policy, + .maxattr = ARRAY_SIZE(ethnl_linkmodes_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_LINKMODES_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_linkmodes_set_policy, + .maxattr = ARRAY_SIZE(ethnl_linkmodes_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_LINKSTATE_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_linkstate_get_policy, + .maxattr = ARRAY_SIZE(ethnl_linkstate_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_DEBUG_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_debug_get_policy, + .maxattr = ARRAY_SIZE(ethnl_debug_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_DEBUG_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_debug_set_policy, + .maxattr = ARRAY_SIZE(ethnl_debug_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_WOL_GET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_wol_get_policy, + .maxattr = ARRAY_SIZE(ethnl_wol_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_WOL_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_wol_set_policy, + .maxattr = ARRAY_SIZE(ethnl_wol_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_FEATURES_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_features_get_policy, + .maxattr = ARRAY_SIZE(ethnl_features_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_FEATURES_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_set_features, + .policy = ethnl_features_set_policy, + .maxattr = ARRAY_SIZE(ethnl_features_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PRIVFLAGS_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_privflags_get_policy, + .maxattr = ARRAY_SIZE(ethnl_privflags_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PRIVFLAGS_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_privflags_set_policy, + .maxattr = ARRAY_SIZE(ethnl_privflags_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_RINGS_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_rings_get_policy, + .maxattr = ARRAY_SIZE(ethnl_rings_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_RINGS_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_rings_set_policy, + .maxattr = ARRAY_SIZE(ethnl_rings_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_CHANNELS_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_channels_get_policy, + .maxattr = ARRAY_SIZE(ethnl_channels_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_CHANNELS_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_channels_set_policy, + .maxattr = ARRAY_SIZE(ethnl_channels_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_COALESCE_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_coalesce_get_policy, + .maxattr = ARRAY_SIZE(ethnl_coalesce_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_COALESCE_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_coalesce_set_policy, + .maxattr = ARRAY_SIZE(ethnl_coalesce_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PAUSE_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_pause_get_policy, + .maxattr = ARRAY_SIZE(ethnl_pause_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PAUSE_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_pause_set_policy, + .maxattr = ARRAY_SIZE(ethnl_pause_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_EEE_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_eee_get_policy, + .maxattr = ARRAY_SIZE(ethnl_eee_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_EEE_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_eee_set_policy, + .maxattr = ARRAY_SIZE(ethnl_eee_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_TSINFO_GET, + .doit = ethnl_default_doit, + .start = ethnl_tsinfo_start, + .dumpit = ethnl_tsinfo_dumpit, + .done = ethnl_tsinfo_done, + .policy = ethnl_tsinfo_get_policy, + .maxattr = ARRAY_SIZE(ethnl_tsinfo_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_CABLE_TEST_ACT, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_act_cable_test, + .policy = ethnl_cable_test_act_policy, + .maxattr = ARRAY_SIZE(ethnl_cable_test_act_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_CABLE_TEST_TDR_ACT, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_act_cable_test_tdr, + .policy = ethnl_cable_test_tdr_act_policy, + .maxattr = ARRAY_SIZE(ethnl_cable_test_tdr_act_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_TUNNEL_INFO_GET, + .doit = ethnl_tunnel_info_doit, + .start = ethnl_tunnel_info_start, + .dumpit = ethnl_tunnel_info_dumpit, + .policy = ethnl_tunnel_info_get_policy, + .maxattr = ARRAY_SIZE(ethnl_tunnel_info_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_FEC_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_fec_get_policy, + .maxattr = ARRAY_SIZE(ethnl_fec_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_FEC_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_fec_set_policy, + .maxattr = ARRAY_SIZE(ethnl_fec_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_MODULE_EEPROM_GET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_module_eeprom_get_policy, + .maxattr = ARRAY_SIZE(ethnl_module_eeprom_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_STATS_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_stats_get_policy, + .maxattr = ARRAY_SIZE(ethnl_stats_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_phc_vclocks_get_policy, + .maxattr = ARRAY_SIZE(ethnl_phc_vclocks_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_MODULE_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_module_get_policy, + .maxattr = ARRAY_SIZE(ethnl_module_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_MODULE_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_module_set_policy, + .maxattr = ARRAY_SIZE(ethnl_module_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PSE_GET, + .doit = ethnl_default_doit, + .start = ethnl_perphy_start, + .dumpit = ethnl_perphy_dumpit, + .done = ethnl_perphy_done, + .policy = ethnl_pse_get_policy, + .maxattr = ARRAY_SIZE(ethnl_pse_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PSE_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_pse_set_policy, + .maxattr = ARRAY_SIZE(ethnl_pse_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_RSS_GET, + .doit = ethnl_default_doit, + .start = ethnl_rss_dump_start, + .dumpit = ethnl_rss_dumpit, + .policy = ethnl_rss_get_policy, + .maxattr = ARRAY_SIZE(ethnl_rss_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PLCA_GET_CFG, + .doit = ethnl_default_doit, + .start = ethnl_perphy_start, + .dumpit = ethnl_perphy_dumpit, + .done = ethnl_perphy_done, + .policy = ethnl_plca_get_cfg_policy, + .maxattr = ARRAY_SIZE(ethnl_plca_get_cfg_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PLCA_SET_CFG, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_plca_set_cfg_policy, + .maxattr = ARRAY_SIZE(ethnl_plca_set_cfg_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PLCA_GET_STATUS, + .doit = ethnl_default_doit, + .start = ethnl_perphy_start, + .dumpit = ethnl_perphy_dumpit, + .done = ethnl_perphy_done, + .policy = ethnl_plca_get_status_policy, + .maxattr = ARRAY_SIZE(ethnl_plca_get_status_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_MM_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_mm_get_policy, + .maxattr = ARRAY_SIZE(ethnl_mm_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_MM_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_mm_set_policy, + .maxattr = ARRAY_SIZE(ethnl_mm_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_MODULE_FW_FLASH_ACT, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_act_module_fw_flash, + .policy = ethnl_module_fw_flash_act_policy, + .maxattr = ARRAY_SIZE(ethnl_module_fw_flash_act_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_PHY_GET, + .doit = ethnl_default_doit, + .start = ethnl_perphy_start, + .dumpit = ethnl_perphy_dumpit, + .done = ethnl_perphy_done, + .policy = ethnl_phy_get_policy, + .maxattr = ARRAY_SIZE(ethnl_phy_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_TSCONFIG_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_tsconfig_get_policy, + .maxattr = ARRAY_SIZE(ethnl_tsconfig_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_TSCONFIG_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_tsconfig_set_policy, + .maxattr = ARRAY_SIZE(ethnl_tsconfig_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_RSS_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_default_set_doit, + .policy = ethnl_rss_set_policy, + .maxattr = ARRAY_SIZE(ethnl_rss_set_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_RSS_CREATE_ACT, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_rss_create_doit, + .policy = ethnl_rss_create_policy, + .maxattr = ARRAY_SIZE(ethnl_rss_create_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_RSS_DELETE_ACT, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_rss_delete_doit, + .policy = ethnl_rss_delete_policy, + .maxattr = ARRAY_SIZE(ethnl_rss_delete_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_MSE_GET, + .doit = ethnl_default_doit, + .start = ethnl_perphy_start, + .dumpit = ethnl_perphy_dumpit, + .done = ethnl_perphy_done, + .policy = ethnl_mse_get_policy, + .maxattr = ARRAY_SIZE(ethnl_mse_get_policy) - 1, + }, +}; + +static const struct genl_multicast_group ethtool_nl_mcgrps[] = { + [ETHNL_MCGRP_MONITOR] = { .name = ETHTOOL_MCGRP_MONITOR_NAME }, +}; + +static struct genl_family ethtool_genl_family __ro_after_init = { + .name = ETHTOOL_GENL_NAME, + .version = ETHTOOL_GENL_VERSION, + .netnsok = true, + .parallel_ops = true, + .ops = ethtool_genl_ops, + .n_ops = ARRAY_SIZE(ethtool_genl_ops), + .resv_start_op = ETHTOOL_MSG_MODULE_GET + 1, + .mcgrps = ethtool_nl_mcgrps, + .n_mcgrps = ARRAY_SIZE(ethtool_nl_mcgrps), + .sock_priv_size = sizeof(struct ethnl_sock_priv), + .sock_priv_destroy = ethnl_sock_priv_destroy, +}; + +/* module setup */ + +static int __init ethnl_init(void) +{ + int ret; + + ret = genl_register_family(ðtool_genl_family); + if (WARN(ret < 0, "ethtool: genetlink family registration failed")) + return ret; + ethnl_ok = true; + + ret = register_netdevice_notifier(ðnl_netdev_notifier); + WARN(ret < 0, "ethtool: net device notifier registration failed"); + return ret; +} + +subsys_initcall(ethnl_init); diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h new file mode 100644 index 000000000000..89010eaa67df --- /dev/null +++ b/net/ethtool/netlink.h @@ -0,0 +1,525 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _NET_ETHTOOL_NETLINK_H +#define _NET_ETHTOOL_NETLINK_H + +#include <linux/ethtool_netlink.h> +#include <linux/netdevice.h> +#include <net/genetlink.h> +#include <net/sock.h> + +struct ethnl_req_info; + +u32 ethnl_bcast_seq_next(void); +int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, + const struct nlattr *nest, struct net *net, + struct netlink_ext_ack *extack, + bool require_dev); +int ethnl_fill_reply_header(struct sk_buff *skb, struct net_device *dev, + u16 attrtype); +struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd, + u16 hdr_attrtype, struct genl_info *info, + void **ehdrp); +void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd); +void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd); +void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd); +int ethnl_multicast(struct sk_buff *skb, struct net_device *dev); +void ethnl_notify(struct net_device *dev, unsigned int cmd, + const struct ethnl_req_info *req_info); + +/** + * ethnl_strz_size() - calculate attribute length for fixed size string + * @s: ETH_GSTRING_LEN sized string (may not be null terminated) + * + * Return: total length of an attribute with null terminated string from @s + */ +static inline int ethnl_strz_size(const char *s) +{ + return nla_total_size(strnlen(s, ETH_GSTRING_LEN) + 1); +} + +/** + * ethnl_put_strz() - put string attribute with fixed size string + * @skb: skb with the message + * @attrtype: attribute type + * @s: ETH_GSTRING_LEN sized string (may not be null terminated) + * + * Puts an attribute with null terminated string from @s into the message. + * + * Return: 0 on success, negative error code on failure + */ +static inline int ethnl_put_strz(struct sk_buff *skb, u16 attrtype, + const char *s) +{ + unsigned int len = strnlen(s, ETH_GSTRING_LEN); + struct nlattr *attr; + + attr = nla_reserve(skb, attrtype, len + 1); + if (!attr) + return -EMSGSIZE; + + memcpy(nla_data(attr), s, len); + ((char *)nla_data(attr))[len] = '\0'; + return 0; +} + +/** + * ethnl_update_u32() - update u32 value from NLA_U32 attribute + * @dst: value to update + * @attr: netlink attribute with new value or null + * @mod: pointer to bool for modification tracking + * + * Copy the u32 value from NLA_U32 netlink attribute @attr into variable + * pointed to by @dst; do nothing if @attr is null. Bool pointed to by @mod + * is set to true if this function changed the value of *dst, otherwise it + * is left as is. + */ +static inline void ethnl_update_u32(u32 *dst, const struct nlattr *attr, + bool *mod) +{ + u32 val; + + if (!attr) + return; + val = nla_get_u32(attr); + if (*dst == val) + return; + + *dst = val; + *mod = true; +} + +/** + * ethnl_update_u8() - update u8 value from NLA_U8 attribute + * @dst: value to update + * @attr: netlink attribute with new value or null + * @mod: pointer to bool for modification tracking + * + * Copy the u8 value from NLA_U8 netlink attribute @attr into variable + * pointed to by @dst; do nothing if @attr is null. Bool pointed to by @mod + * is set to true if this function changed the value of *dst, otherwise it + * is left as is. + */ +static inline void ethnl_update_u8(u8 *dst, const struct nlattr *attr, + bool *mod) +{ + u8 val; + + if (!attr) + return; + val = nla_get_u8(attr); + if (*dst == val) + return; + + *dst = val; + *mod = true; +} + +/** + * ethnl_update_bool32() - update u32 used as bool from NLA_U8 attribute + * @dst: value to update + * @attr: netlink attribute with new value or null + * @mod: pointer to bool for modification tracking + * + * Use the u8 value from NLA_U8 netlink attribute @attr to set u32 variable + * pointed to by @dst to 0 (if zero) or 1 (if not); do nothing if @attr is + * null. Bool pointed to by @mod is set to true if this function changed the + * logical value of *dst, otherwise it is left as is. + */ +static inline void ethnl_update_bool32(u32 *dst, const struct nlattr *attr, + bool *mod) +{ + u8 val; + + if (!attr) + return; + val = !!nla_get_u8(attr); + if (!!*dst == val) + return; + + *dst = val; + *mod = true; +} + +/** + * ethnl_update_bool() - updateb bool used as bool from NLA_U8 attribute + * @dst: value to update + * @attr: netlink attribute with new value or null + * @mod: pointer to bool for modification tracking + * + * Use the bool value from NLA_U8 netlink attribute @attr to set bool variable + * pointed to by @dst to 0 (if zero) or 1 (if not); do nothing if @attr is + * null. Bool pointed to by @mod is set to true if this function changed the + * logical value of *dst, otherwise it is left as is. + */ +static inline void ethnl_update_bool(bool *dst, const struct nlattr *attr, + bool *mod) +{ + u8 val; + + if (!attr) + return; + val = !!nla_get_u8(attr); + if (!!*dst == val) + return; + + *dst = val; + *mod = true; +} + +/** + * ethnl_update_binary() - update binary data from NLA_BINARY attribute + * @dst: value to update + * @len: destination buffer length + * @attr: netlink attribute with new value or null + * @mod: pointer to bool for modification tracking + * + * Use the u8 value from NLA_U8 netlink attribute @attr to rewrite data block + * of length @len at @dst by attribute payload; do nothing if @attr is null. + * Bool pointed to by @mod is set to true if this function changed the logical + * value of *dst, otherwise it is left as is. + */ +static inline void ethnl_update_binary(void *dst, unsigned int len, + const struct nlattr *attr, bool *mod) +{ + if (!attr) + return; + if (nla_len(attr) < len) + len = nla_len(attr); + if (!memcmp(dst, nla_data(attr), len)) + return; + + memcpy(dst, nla_data(attr), len); + *mod = true; +} + +/** + * ethnl_update_bitfield32() - update u32 value from NLA_BITFIELD32 attribute + * @dst: value to update + * @attr: netlink attribute with new value or null + * @mod: pointer to bool for modification tracking + * + * Update bits in u32 value which are set in attribute's mask to values from + * attribute's value. Do nothing if @attr is null or the value wouldn't change; + * otherwise, set bool pointed to by @mod to true. + */ +static inline void ethnl_update_bitfield32(u32 *dst, const struct nlattr *attr, + bool *mod) +{ + struct nla_bitfield32 change; + u32 newval; + + if (!attr) + return; + change = nla_get_bitfield32(attr); + newval = (*dst & ~change.selector) | (change.value & change.selector); + if (*dst == newval) + return; + + *dst = newval; + *mod = true; +} + +/** + * ethnl_reply_header_size() - total size of reply header + * + * This is an upper estimate so that we do not need to hold RTNL lock longer + * than necessary (to prevent rename between size estimate and composing the + * message). Accounts only for device ifindex and name as those are the only + * attributes ethnl_fill_reply_header() puts into the reply header. + */ +static inline unsigned int ethnl_reply_header_size(void) +{ + return nla_total_size(nla_total_size(sizeof(u32)) + + nla_total_size(IFNAMSIZ)); +} + +/* GET request handling */ + +/* Unified processing of GET requests uses two data structures: request info + * and reply data. Request info holds information parsed from client request + * and its stays constant through all request processing. Reply data holds data + * retrieved from ethtool_ops callbacks or other internal sources which is used + * to compose the reply. When processing a dump request, request info is filled + * only once (when the request message is parsed) but reply data is filled for + * each reply message. + * + * Both structures consist of part common for all request types (struct + * ethnl_req_info and struct ethnl_reply_data defined below) and optional + * parts specific for each request type. Common part always starts at offset 0. + */ + +/** + * struct ethnl_req_info - base type of request information for GET requests + * @dev: network device the request is for (may be null) + * @dev_tracker: refcount tracker for @dev reference + * @flags: request flags common for all request types + * @phy_index: phy_device index connected to @dev this request is for. Can be + * 0 if the request doesn't target a phy, or if the @dev's attached + * phy is targeted. + * + * This is a common base for request specific structures holding data from + * parsed userspace request. These always embed struct ethnl_req_info at + * zero offset. + */ +struct ethnl_req_info { + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u32 phy_index; +}; + +static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info) +{ + netdev_put(req_info->dev, &req_info->dev_tracker); +} + +/** + * ethnl_req_get_phydev() - Gets the phy_device targeted by this request, + * if any. Must be called under rntl_lock(). + * @req_info: The ethnl request to get the phy from. + * @tb: The netlink attributes array, for error reporting. + * @header: The netlink header index, used for error reporting. + * @extack: The netlink extended ACK, for error reporting. + * + * The caller must hold RTNL, until it's done interacting with the returned + * phy_device. + * + * Return: A phy_device pointer corresponding either to the passed phy_index + * if one is provided. If not, the phy_device attached to the + * net_device targeted by this request is returned. If there's no + * targeted net_device, or no phy_device is attached, NULL is + * returned. If the provided phy_index is invalid, an error pointer + * is returned. + */ +struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, + struct nlattr **tb, unsigned int header, + struct netlink_ext_ack *extack); + +/** + * struct ethnl_reply_data - base type of reply data for GET requests + * @dev: device for current reply message; in single shot requests it is + * equal to ðnl_req_info.dev; in dumps it's different for each + * reply message + * + * This is a common base for request specific structures holding data for + * kernel reply message. These always embed struct ethnl_reply_data at zero + * offset. + */ +struct ethnl_reply_data { + struct net_device *dev; +}; + +int ethnl_ops_begin(struct net_device *dev); +void ethnl_ops_complete(struct net_device *dev); + +enum ethnl_sock_type { + ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH, +}; + +struct ethnl_sock_priv { + struct net_device *dev; + u32 portid; + enum ethnl_sock_type type; +}; + +int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid, + enum ethnl_sock_type type); + +/** + * struct ethnl_request_ops - unified handling of GET and SET requests + * @request_cmd: command id for request (GET) + * @reply_cmd: command id for reply (GET_REPLY) + * @hdr_attr: attribute type for request header + * @req_info_size: size of request info + * @reply_data_size: size of reply data + * @allow_nodev_do: allow non-dump request with no device identification + * @set_ntf_cmd: notification to generate on changes (SET) + * @parse_request: + * Parse request except common header (struct ethnl_req_info). Common + * header is already filled on entry, the rest up to @repdata_offset + * is zero initialized. This callback should only modify type specific + * request info by parsed attributes from request message. + * Called for both GET and SET. Information parsed for SET will + * be conveyed to the req_info used during NTF generation. + * @prepare_data: + * Retrieve and prepare data needed to compose a reply message. Calls to + * ethtool_ops handlers are limited to this callback. Common reply data + * (struct ethnl_reply_data) is filled on entry, type specific part after + * it is zero initialized. This callback should only modify the type + * specific part of reply data. Device identification from struct + * ethnl_reply_data is to be used as for dump requests, it iterates + * through network devices while dev member of struct ethnl_req_info + * points to the device from client request. + * @reply_size: + * Estimate reply message size. Returned value must be sufficient for + * message payload without common reply header. The callback may returned + * estimate higher than actual message size if exact calculation would + * not be worth the saved memory space. + * @fill_reply: + * Fill reply message payload (except for common header) from reply data. + * The callback must not generate more payload than previously called + * ->reply_size() estimated. + * @cleanup_data: + * Optional cleanup called when reply data is no longer needed. Can be + * used e.g. to free any additional data structures outside the main + * structure which were allocated by ->prepare_data(). When processing + * dump requests, ->cleanup() is called for each message. + * @set_validate: + * Check if set operation is supported for a given device, and perform + * extra input checks. Expected return values: + * - 0 if the operation is a noop for the device (rare) + * - 1 if operation should proceed to calling @set + * - negative errno on errors + * Called without any locks, just a reference on the netdev. + * @set: + * Execute the set operation. The implementation should return + * - 0 if no configuration has changed + * - 1 if configuration changed and notification should be generated + * - negative errno on errors + * + * Description of variable parts of GET request handling when using the + * unified infrastructure. When used, a pointer to an instance of this + * structure is to be added to ðnl_default_requests array and generic + * handlers ethnl_default_doit(), ethnl_default_dumpit(), + * ethnl_default_start() and ethnl_default_done() used in @ethtool_genl_ops; + * ethnl_default_notify() can be used in @ethnl_notify_handlers to send + * notifications of the corresponding type. + */ +struct ethnl_request_ops { + u8 request_cmd; + u8 reply_cmd; + u16 hdr_attr; + unsigned int req_info_size; + unsigned int reply_data_size; + bool allow_nodev_do; + u8 set_ntf_cmd; + + int (*parse_request)(struct ethnl_req_info *req_info, + struct nlattr **tb, + struct netlink_ext_ack *extack); + int (*prepare_data)(const struct ethnl_req_info *req_info, + struct ethnl_reply_data *reply_data, + const struct genl_info *info); + int (*reply_size)(const struct ethnl_req_info *req_info, + const struct ethnl_reply_data *reply_data); + int (*fill_reply)(struct sk_buff *skb, + const struct ethnl_req_info *req_info, + const struct ethnl_reply_data *reply_data); + void (*cleanup_data)(struct ethnl_reply_data *reply_data); + + int (*set_validate)(struct ethnl_req_info *req_info, + struct genl_info *info); + int (*set)(struct ethnl_req_info *req_info, + struct genl_info *info); +}; + +/* request handlers */ + +extern const struct ethnl_request_ops ethnl_strset_request_ops; +extern const struct ethnl_request_ops ethnl_linkinfo_request_ops; +extern const struct ethnl_request_ops ethnl_linkmodes_request_ops; +extern const struct ethnl_request_ops ethnl_linkstate_request_ops; +extern const struct ethnl_request_ops ethnl_debug_request_ops; +extern const struct ethnl_request_ops ethnl_wol_request_ops; +extern const struct ethnl_request_ops ethnl_features_request_ops; +extern const struct ethnl_request_ops ethnl_privflags_request_ops; +extern const struct ethnl_request_ops ethnl_rings_request_ops; +extern const struct ethnl_request_ops ethnl_channels_request_ops; +extern const struct ethnl_request_ops ethnl_coalesce_request_ops; +extern const struct ethnl_request_ops ethnl_pause_request_ops; +extern const struct ethnl_request_ops ethnl_eee_request_ops; +extern const struct ethnl_request_ops ethnl_tsinfo_request_ops; +extern const struct ethnl_request_ops ethnl_fec_request_ops; +extern const struct ethnl_request_ops ethnl_module_eeprom_request_ops; +extern const struct ethnl_request_ops ethnl_stats_request_ops; +extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops; +extern const struct ethnl_request_ops ethnl_module_request_ops; +extern const struct ethnl_request_ops ethnl_pse_request_ops; +extern const struct ethnl_request_ops ethnl_rss_request_ops; +extern const struct ethnl_request_ops ethnl_plca_cfg_request_ops; +extern const struct ethnl_request_ops ethnl_plca_status_request_ops; +extern const struct ethnl_request_ops ethnl_mm_request_ops; +extern const struct ethnl_request_ops ethnl_phy_request_ops; +extern const struct ethnl_request_ops ethnl_tsconfig_request_ops; +extern const struct ethnl_request_ops ethnl_mse_request_ops; + +extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1]; +extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1]; +extern const struct nla_policy ethnl_header_policy_phy[ETHTOOL_A_HEADER_PHY_INDEX + 1]; +extern const struct nla_policy ethnl_header_policy_phy_stats[ETHTOOL_A_HEADER_PHY_INDEX + 1]; +extern const struct nla_policy ethnl_strset_get_policy[ETHTOOL_A_STRSET_COUNTS_ONLY + 1]; +extern const struct nla_policy ethnl_linkinfo_get_policy[ETHTOOL_A_LINKINFO_HEADER + 1]; +extern const struct nla_policy ethnl_linkinfo_set_policy[ETHTOOL_A_LINKINFO_TP_MDIX_CTRL + 1]; +extern const struct nla_policy ethnl_linkmodes_get_policy[ETHTOOL_A_LINKMODES_HEADER + 1]; +extern const struct nla_policy ethnl_linkmodes_set_policy[ETHTOOL_A_LINKMODES_LANES + 1]; +extern const struct nla_policy ethnl_linkstate_get_policy[ETHTOOL_A_LINKSTATE_HEADER + 1]; +extern const struct nla_policy ethnl_debug_get_policy[ETHTOOL_A_DEBUG_HEADER + 1]; +extern const struct nla_policy ethnl_debug_set_policy[ETHTOOL_A_DEBUG_MSGMASK + 1]; +extern const struct nla_policy ethnl_wol_get_policy[ETHTOOL_A_WOL_HEADER + 1]; +extern const struct nla_policy ethnl_wol_set_policy[ETHTOOL_A_WOL_SOPASS + 1]; +extern const struct nla_policy ethnl_features_get_policy[ETHTOOL_A_FEATURES_HEADER + 1]; +extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANTED + 1]; +extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1]; +extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1]; +extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1]; +extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_HDS_THRESH_MAX + 1]; +extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1]; +extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1]; +extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1]; +extern const struct nla_policy ethnl_coalesce_set_policy[ETHTOOL_A_COALESCE_MAX + 1]; +extern const struct nla_policy ethnl_pause_get_policy[ETHTOOL_A_PAUSE_STATS_SRC + 1]; +extern const struct nla_policy ethnl_pause_set_policy[ETHTOOL_A_PAUSE_STATS_SRC + 1]; +extern const struct nla_policy ethnl_eee_get_policy[ETHTOOL_A_EEE_HEADER + 1]; +extern const struct nla_policy ethnl_eee_set_policy[ETHTOOL_A_EEE_TX_LPI_TIMER + 1]; +extern const struct nla_policy ethnl_tsinfo_get_policy[ETHTOOL_A_TSINFO_MAX + 1]; +extern const struct nla_policy ethnl_cable_test_act_policy[ETHTOOL_A_CABLE_TEST_HEADER + 1]; +extern const struct nla_policy ethnl_cable_test_tdr_act_policy[ETHTOOL_A_CABLE_TEST_TDR_CFG + 1]; +extern const struct nla_policy ethnl_tunnel_info_get_policy[ETHTOOL_A_TUNNEL_INFO_HEADER + 1]; +extern const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1]; +extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1]; +extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1]; +extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_SRC + 1]; +extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1]; +extern const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER + 1]; +extern const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1]; +extern const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1]; +extern const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1]; +extern const struct nla_policy ethnl_rss_get_policy[ETHTOOL_A_RSS_START_CONTEXT + 1]; +extern const struct nla_policy ethnl_rss_set_policy[ETHTOOL_A_RSS_FLOW_HASH + 1]; +extern const struct nla_policy ethnl_rss_create_policy[ETHTOOL_A_RSS_INPUT_XFRM + 1]; +extern const struct nla_policy ethnl_rss_delete_policy[ETHTOOL_A_RSS_CONTEXT + 1]; +extern const struct nla_policy ethnl_plca_get_cfg_policy[ETHTOOL_A_PLCA_HEADER + 1]; +extern const struct nla_policy ethnl_plca_set_cfg_policy[ETHTOOL_A_PLCA_MAX + 1]; +extern const struct nla_policy ethnl_plca_get_status_policy[ETHTOOL_A_PLCA_HEADER + 1]; +extern const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1]; +extern const struct nla_policy ethnl_mm_set_policy[ETHTOOL_A_MM_MAX + 1]; +extern const struct nla_policy ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1]; +extern const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1]; +extern const struct nla_policy ethnl_tsconfig_get_policy[ETHTOOL_A_TSCONFIG_HEADER + 1]; +extern const struct nla_policy ethnl_tsconfig_set_policy[ETHTOOL_A_TSCONFIG_MAX + 1]; +extern const struct nla_policy ethnl_mse_get_policy[ETHTOOL_A_MSE_HEADER + 1]; + +int ethnl_set_features(struct sk_buff *skb, struct genl_info *info); +int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info); +int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info); +int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info); +int ethnl_tunnel_info_start(struct netlink_callback *cb); +int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb); +int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info); +int ethnl_rss_dump_start(struct netlink_callback *cb); +int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb); +int ethnl_tsinfo_start(struct netlink_callback *cb); +int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb); +int ethnl_tsinfo_done(struct netlink_callback *cb); +int ethnl_rss_create_doit(struct sk_buff *skb, struct genl_info *info); +int ethnl_rss_delete_doit(struct sk_buff *skb, struct genl_info *info); + +extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN]; +extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN]; +extern const char stats_eth_mac_names[__ETHTOOL_A_STATS_ETH_MAC_CNT][ETH_GSTRING_LEN]; +extern const char stats_eth_ctrl_names[__ETHTOOL_A_STATS_ETH_CTRL_CNT][ETH_GSTRING_LEN]; +extern const char stats_rmon_names[__ETHTOOL_A_STATS_RMON_CNT][ETH_GSTRING_LEN]; +extern const char stats_phy_names[__ETHTOOL_A_STATS_PHY_CNT][ETH_GSTRING_LEN]; + +#endif /* _NET_ETHTOOL_NETLINK_H */ diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c new file mode 100644 index 000000000000..0f9af1e66548 --- /dev/null +++ b/net/ethtool/pause.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "netlink.h" +#include "common.h" + +struct pause_req_info { + struct ethnl_req_info base; + enum ethtool_mac_stats_src src; +}; + +#define PAUSE_REQINFO(__req_base) \ + container_of(__req_base, struct pause_req_info, base) + +struct pause_reply_data { + struct ethnl_reply_data base; + struct ethtool_pauseparam pauseparam; + struct ethtool_pause_stats pausestat; +}; + +#define PAUSE_REPDATA(__reply_base) \ + container_of(__reply_base, struct pause_reply_data, base) + +const struct nla_policy ethnl_pause_get_policy[] = { + [ETHTOOL_A_PAUSE_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy_stats), + [ETHTOOL_A_PAUSE_STATS_SRC] = + NLA_POLICY_MAX(NLA_U32, ETHTOOL_MAC_STATS_SRC_PMAC), +}; + +static int pause_parse_request(struct ethnl_req_info *req_base, + struct nlattr **tb, + struct netlink_ext_ack *extack) +{ + enum ethtool_mac_stats_src src = ETHTOOL_MAC_STATS_SRC_AGGREGATE; + struct pause_req_info *req_info = PAUSE_REQINFO(req_base); + + if (tb[ETHTOOL_A_PAUSE_STATS_SRC]) { + if (!(req_base->flags & ETHTOOL_FLAG_STATS)) { + NL_SET_ERR_MSG_MOD(extack, + "ETHTOOL_FLAG_STATS must be set when requesting a source of stats"); + return -EINVAL; + } + + src = nla_get_u32(tb[ETHTOOL_A_PAUSE_STATS_SRC]); + } + + req_info->src = src; + + return 0; +} + +static int pause_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + const struct pause_req_info *req_info = PAUSE_REQINFO(req_base); + struct pause_reply_data *data = PAUSE_REPDATA(reply_base); + enum ethtool_mac_stats_src src = req_info->src; + struct net_device *dev = reply_base->dev; + int ret; + + if (!dev->ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + ethtool_stats_init((u64 *)&data->pausestat, + sizeof(data->pausestat) / 8); + data->pausestat.src = src; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + if ((src == ETHTOOL_MAC_STATS_SRC_EMAC || + src == ETHTOOL_MAC_STATS_SRC_PMAC) && + !__ethtool_dev_mm_supported(dev)) { + NL_SET_ERR_MSG_MOD(info->extack, + "Device does not support MAC merge layer"); + ethnl_ops_complete(dev); + return -EOPNOTSUPP; + } + + dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam); + if (req_base->flags & ETHTOOL_FLAG_STATS && + dev->ethtool_ops->get_pause_stats) + dev->ethtool_ops->get_pause_stats(dev, &data->pausestat); + + ethnl_ops_complete(dev); + + return 0; +} + +static int pause_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + int n = nla_total_size(sizeof(u8)) + /* _PAUSE_AUTONEG */ + nla_total_size(sizeof(u8)) + /* _PAUSE_RX */ + nla_total_size(sizeof(u8)); /* _PAUSE_TX */ + + if (req_base->flags & ETHTOOL_FLAG_STATS) + n += nla_total_size(0) + /* _PAUSE_STATS */ + nla_total_size(sizeof(u32)) + /* _PAUSE_STATS_SRC */ + nla_total_size_64bit(sizeof(u64)) * ETHTOOL_PAUSE_STAT_CNT; + return n; +} + +static int ethtool_put_stat(struct sk_buff *skb, u64 val, u16 attrtype, + u16 padtype) +{ + if (val == ETHTOOL_STAT_NOT_SET) + return 0; + if (nla_put_u64_64bit(skb, attrtype, val, padtype)) + return -EMSGSIZE; + + return 0; +} + +static int pause_put_stats(struct sk_buff *skb, + const struct ethtool_pause_stats *pause_stats) +{ + const u16 pad = ETHTOOL_A_PAUSE_STAT_PAD; + struct nlattr *nest; + + if (nla_put_u32(skb, ETHTOOL_A_PAUSE_STATS_SRC, pause_stats->src)) + return -EMSGSIZE; + + nest = nla_nest_start(skb, ETHTOOL_A_PAUSE_STATS); + if (!nest) + return -EMSGSIZE; + + if (ethtool_put_stat(skb, pause_stats->tx_pause_frames, + ETHTOOL_A_PAUSE_STAT_TX_FRAMES, pad) || + ethtool_put_stat(skb, pause_stats->rx_pause_frames, + ETHTOOL_A_PAUSE_STAT_RX_FRAMES, pad)) + goto err_cancel; + + nla_nest_end(skb, nest); + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int pause_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct pause_reply_data *data = PAUSE_REPDATA(reply_base); + const struct ethtool_pauseparam *pauseparam = &data->pauseparam; + + if (nla_put_u8(skb, ETHTOOL_A_PAUSE_AUTONEG, !!pauseparam->autoneg) || + nla_put_u8(skb, ETHTOOL_A_PAUSE_RX, !!pauseparam->rx_pause) || + nla_put_u8(skb, ETHTOOL_A_PAUSE_TX, !!pauseparam->tx_pause)) + return -EMSGSIZE; + + if (req_base->flags & ETHTOOL_FLAG_STATS && + pause_put_stats(skb, &data->pausestat)) + return -EMSGSIZE; + + return 0; +} + +/* PAUSE_SET */ + +const struct nla_policy ethnl_pause_set_policy[] = { + [ETHTOOL_A_PAUSE_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_PAUSE_AUTONEG] = { .type = NLA_U8 }, + [ETHTOOL_A_PAUSE_RX] = { .type = NLA_U8 }, + [ETHTOOL_A_PAUSE_TX] = { .type = NLA_U8 }, + [ETHTOOL_A_PAUSE_STATS_SRC] = { .type = NLA_REJECT }, +}; + +static int +ethnl_set_pause_validate(struct ethnl_req_info *req_info, + struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + + return ops->get_pauseparam && ops->set_pauseparam ? 1 : -EOPNOTSUPP; +} + +static int +ethnl_set_pause(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct net_device *dev = req_info->dev; + struct ethtool_pauseparam params = {}; + struct nlattr **tb = info->attrs; + bool mod = false; + int ret; + + dev->ethtool_ops->get_pauseparam(dev, ¶ms); + + ethnl_update_bool32(¶ms.autoneg, tb[ETHTOOL_A_PAUSE_AUTONEG], &mod); + ethnl_update_bool32(¶ms.rx_pause, tb[ETHTOOL_A_PAUSE_RX], &mod); + ethnl_update_bool32(¶ms.tx_pause, tb[ETHTOOL_A_PAUSE_TX], &mod); + if (!mod) + return 0; + + ret = dev->ethtool_ops->set_pauseparam(dev, ¶ms); + return ret < 0 ? ret : 1; +} + +const struct ethnl_request_ops ethnl_pause_request_ops = { + .request_cmd = ETHTOOL_MSG_PAUSE_GET, + .reply_cmd = ETHTOOL_MSG_PAUSE_GET_REPLY, + .hdr_attr = ETHTOOL_A_PAUSE_HEADER, + .req_info_size = sizeof(struct pause_req_info), + .reply_data_size = sizeof(struct pause_reply_data), + + .parse_request = pause_parse_request, + .prepare_data = pause_prepare_data, + .reply_size = pause_reply_size, + .fill_reply = pause_fill_reply, + + .set_validate = ethnl_set_pause_validate, + .set = ethnl_set_pause, + .set_ntf_cmd = ETHTOOL_MSG_PAUSE_NTF, +}; diff --git a/net/ethtool/phc_vclocks.c b/net/ethtool/phc_vclocks.c new file mode 100644 index 000000000000..cadaabed60bd --- /dev/null +++ b/net/ethtool/phc_vclocks.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2021 NXP + */ +#include "netlink.h" +#include "common.h" + +struct phc_vclocks_req_info { + struct ethnl_req_info base; +}; + +struct phc_vclocks_reply_data { + struct ethnl_reply_data base; + int num; + int *index; +}; + +#define PHC_VCLOCKS_REPDATA(__reply_base) \ + container_of(__reply_base, struct phc_vclocks_reply_data, base) + +const struct nla_policy ethnl_phc_vclocks_get_policy[] = { + [ETHTOOL_A_PHC_VCLOCKS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int phc_vclocks_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct phc_vclocks_reply_data *data = PHC_VCLOCKS_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + data->num = ethtool_get_phc_vclocks(dev, &data->index); + ethnl_ops_complete(dev); + + return ret; +} + +static int phc_vclocks_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct phc_vclocks_reply_data *data = + PHC_VCLOCKS_REPDATA(reply_base); + int len = 0; + + if (data->num > 0) { + len += nla_total_size(sizeof(u32)); + len += nla_total_size(sizeof(s32) * data->num); + } + + return len; +} + +static int phc_vclocks_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct phc_vclocks_reply_data *data = + PHC_VCLOCKS_REPDATA(reply_base); + + if (data->num <= 0) + return 0; + + if (nla_put_u32(skb, ETHTOOL_A_PHC_VCLOCKS_NUM, data->num) || + nla_put(skb, ETHTOOL_A_PHC_VCLOCKS_INDEX, + sizeof(s32) * data->num, data->index)) + return -EMSGSIZE; + + return 0; +} + +static void phc_vclocks_cleanup_data(struct ethnl_reply_data *reply_base) +{ + const struct phc_vclocks_reply_data *data = + PHC_VCLOCKS_REPDATA(reply_base); + + kfree(data->index); +} + +const struct ethnl_request_ops ethnl_phc_vclocks_request_ops = { + .request_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET, + .reply_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, + .hdr_attr = ETHTOOL_A_PHC_VCLOCKS_HEADER, + .req_info_size = sizeof(struct phc_vclocks_req_info), + .reply_data_size = sizeof(struct phc_vclocks_reply_data), + + .prepare_data = phc_vclocks_prepare_data, + .reply_size = phc_vclocks_reply_size, + .fill_reply = phc_vclocks_fill_reply, + .cleanup_data = phc_vclocks_cleanup_data, +}; diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c new file mode 100644 index 000000000000..68372bef4b2f --- /dev/null +++ b/net/ethtool/phy.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2023 Bootlin + * + */ +#include "common.h" +#include "netlink.h" + +#include <linux/phy.h> +#include <linux/phy_link_topology.h> +#include <linux/sfp.h> +#include <net/netdev_lock.h> + +struct phy_req_info { + struct ethnl_req_info base; +}; + +struct phy_reply_data { + struct ethnl_reply_data base; + u32 phyindex; + char *drvname; + char *name; + unsigned int upstream_type; + char *upstream_sfp_name; + unsigned int upstream_index; + char *downstream_sfp_name; +}; + +#define PHY_REPDATA(__reply_base) \ + container_of(__reply_base, struct phy_reply_data, base) + +const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = { + [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int phy_reply_size(const struct ethnl_req_info *req_info, + const struct ethnl_reply_data *reply_data) +{ + struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); + size_t size = 0; + + /* ETHTOOL_A_PHY_INDEX */ + size += nla_total_size(sizeof(u32)); + + /* ETHTOOL_A_DRVNAME */ + if (rep_data->drvname) + size += nla_total_size(strlen(rep_data->drvname) + 1); + + /* ETHTOOL_A_NAME */ + size += nla_total_size(strlen(rep_data->name) + 1); + + /* ETHTOOL_A_PHY_UPSTREAM_TYPE */ + size += nla_total_size(sizeof(u32)); + + /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */ + if (rep_data->upstream_sfp_name) + size += nla_total_size(strlen(rep_data->upstream_sfp_name) + 1); + + /* ETHTOOL_A_PHY_UPSTREAM_INDEX */ + if (rep_data->upstream_index) + size += nla_total_size(sizeof(u32)); + + /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */ + if (rep_data->downstream_sfp_name) + size += nla_total_size(strlen(rep_data->downstream_sfp_name) + 1); + + return size; +} + +static int phy_prepare_data(const struct ethnl_req_info *req_info, + struct ethnl_reply_data *reply_data, + const struct genl_info *info) +{ + struct phy_link_topology *topo = reply_data->dev->link_topo; + struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); + struct nlattr **tb = info->attrs; + struct phy_device_node *pdn; + struct phy_device *phydev; + + /* RTNL is held by the caller */ + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PHY_HEADER, + info->extack); + if (IS_ERR_OR_NULL(phydev)) + return -EOPNOTSUPP; + + pdn = xa_load(&topo->phys, phydev->phyindex); + if (!pdn) + return -EOPNOTSUPP; + + rep_data->phyindex = phydev->phyindex; + rep_data->name = kstrdup(dev_name(&phydev->mdio.dev), GFP_KERNEL); + rep_data->drvname = kstrdup(phydev->drv->name, GFP_KERNEL); + rep_data->upstream_type = pdn->upstream_type; + + if (pdn->upstream_type == PHY_UPSTREAM_PHY) { + struct phy_device *upstream = pdn->upstream.phydev; + rep_data->upstream_index = upstream->phyindex; + } + + if (pdn->parent_sfp_bus) + rep_data->upstream_sfp_name = kstrdup(sfp_get_name(pdn->parent_sfp_bus), + GFP_KERNEL); + + if (phydev->sfp_bus) + rep_data->downstream_sfp_name = kstrdup(sfp_get_name(phydev->sfp_bus), + GFP_KERNEL); + + return 0; +} + +static int phy_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_info, + const struct ethnl_reply_data *reply_data) +{ + struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); + + if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, rep_data->phyindex) || + nla_put_string(skb, ETHTOOL_A_PHY_NAME, rep_data->name) || + nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, rep_data->upstream_type)) + return -EMSGSIZE; + + if (rep_data->drvname && + nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, rep_data->drvname)) + return -EMSGSIZE; + + if (rep_data->upstream_index && + nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, + rep_data->upstream_index)) + return -EMSGSIZE; + + if (rep_data->upstream_sfp_name && + nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME, + rep_data->upstream_sfp_name)) + return -EMSGSIZE; + + if (rep_data->downstream_sfp_name && + nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME, + rep_data->downstream_sfp_name)) + return -EMSGSIZE; + + return 0; +} + +static void phy_cleanup_data(struct ethnl_reply_data *reply_data) +{ + struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); + + kfree(rep_data->drvname); + kfree(rep_data->name); + kfree(rep_data->upstream_sfp_name); + kfree(rep_data->downstream_sfp_name); +} + +const struct ethnl_request_ops ethnl_phy_request_ops = { + .request_cmd = ETHTOOL_MSG_PHY_GET, + .reply_cmd = ETHTOOL_MSG_PHY_GET_REPLY, + .hdr_attr = ETHTOOL_A_PHY_HEADER, + .req_info_size = sizeof(struct phy_req_info), + .reply_data_size = sizeof(struct phy_reply_data), + + .prepare_data = phy_prepare_data, + .reply_size = phy_reply_size, + .fill_reply = phy_fill_reply, + .cleanup_data = phy_cleanup_data, +}; diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c new file mode 100644 index 000000000000..e1f7820a6158 --- /dev/null +++ b/net/ethtool/plca.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/phy.h> +#include <linux/ethtool_netlink.h> + +#include "netlink.h" +#include "common.h" + +struct plca_req_info { + struct ethnl_req_info base; +}; + +struct plca_reply_data { + struct ethnl_reply_data base; + struct phy_plca_cfg plca_cfg; + struct phy_plca_status plca_st; +}; + +// Helpers ------------------------------------------------------------------ // + +#define PLCA_REPDATA(__reply_base) \ + container_of(__reply_base, struct plca_reply_data, base) + +// PLCA get configuration message ------------------------------------------- // + +const struct nla_policy ethnl_plca_get_cfg_policy[] = { + [ETHTOOL_A_PLCA_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy_phy), +}; + +static void plca_update_sint(int *dst, struct nlattr **tb, u32 attrid, + bool *mod) +{ + const struct nlattr *attr = tb[attrid]; + + if (!attr || + WARN_ON_ONCE(attrid >= ARRAY_SIZE(ethnl_plca_set_cfg_policy))) + return; + + switch (ethnl_plca_set_cfg_policy[attrid].type) { + case NLA_U8: + *dst = nla_get_u8(attr); + break; + case NLA_U32: + *dst = nla_get_u32(attr); + break; + default: + WARN_ON_ONCE(1); + } + + *mod = true; +} + +static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct plca_reply_data *data = PLCA_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + const struct ethtool_phy_ops *ops; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; + int ret; + + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER, + info->extack); + // check that the PHY device is available and connected + if (IS_ERR_OR_NULL(phydev)) { + ret = -EOPNOTSUPP; + goto out; + } + + // note: rtnl_lock is held already by ethnl_default_doit + ops = ethtool_phy_ops; + if (!ops || !ops->get_plca_cfg) { + ret = -EOPNOTSUPP; + goto out; + } + + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto out; + + memset(&data->plca_cfg, 0xff, + sizeof_field(struct plca_reply_data, plca_cfg)); + + ret = ops->get_plca_cfg(phydev, &data->plca_cfg); + ethnl_ops_complete(dev); + +out: + return ret; +} + +static int plca_get_cfg_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + return nla_total_size(sizeof(u16)) + /* _VERSION */ + nla_total_size(sizeof(u8)) + /* _ENABLED */ + nla_total_size(sizeof(u32)) + /* _NODE_CNT */ + nla_total_size(sizeof(u32)) + /* _NODE_ID */ + nla_total_size(sizeof(u32)) + /* _TO_TIMER */ + nla_total_size(sizeof(u32)) + /* _BURST_COUNT */ + nla_total_size(sizeof(u32)); /* _BURST_TIMER */ +} + +static int plca_get_cfg_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct plca_reply_data *data = PLCA_REPDATA(reply_base); + const struct phy_plca_cfg *plca = &data->plca_cfg; + + if ((plca->version >= 0 && + nla_put_u16(skb, ETHTOOL_A_PLCA_VERSION, plca->version)) || + (plca->enabled >= 0 && + nla_put_u8(skb, ETHTOOL_A_PLCA_ENABLED, !!plca->enabled)) || + (plca->node_id >= 0 && + nla_put_u32(skb, ETHTOOL_A_PLCA_NODE_ID, plca->node_id)) || + (plca->node_cnt >= 0 && + nla_put_u32(skb, ETHTOOL_A_PLCA_NODE_CNT, plca->node_cnt)) || + (plca->to_tmr >= 0 && + nla_put_u32(skb, ETHTOOL_A_PLCA_TO_TMR, plca->to_tmr)) || + (plca->burst_cnt >= 0 && + nla_put_u32(skb, ETHTOOL_A_PLCA_BURST_CNT, plca->burst_cnt)) || + (plca->burst_tmr >= 0 && + nla_put_u32(skb, ETHTOOL_A_PLCA_BURST_TMR, plca->burst_tmr))) + return -EMSGSIZE; + + return 0; +}; + +// PLCA set configuration message ------------------------------------------- // + +const struct nla_policy ethnl_plca_set_cfg_policy[] = { + [ETHTOOL_A_PLCA_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy_phy), + [ETHTOOL_A_PLCA_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1), + [ETHTOOL_A_PLCA_NODE_ID] = NLA_POLICY_MAX(NLA_U32, 255), + [ETHTOOL_A_PLCA_NODE_CNT] = NLA_POLICY_RANGE(NLA_U32, 1, 255), + [ETHTOOL_A_PLCA_TO_TMR] = NLA_POLICY_MAX(NLA_U32, 255), + [ETHTOOL_A_PLCA_BURST_CNT] = NLA_POLICY_MAX(NLA_U32, 255), + [ETHTOOL_A_PLCA_BURST_TMR] = NLA_POLICY_MAX(NLA_U32, 255), +}; + +static int +ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info) +{ + const struct ethtool_phy_ops *ops; + struct nlattr **tb = info->attrs; + struct phy_plca_cfg plca_cfg; + struct phy_device *phydev; + bool mod = false; + int ret; + + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PLCA_HEADER, + info->extack); + // check that the PHY device is available and connected + if (IS_ERR_OR_NULL(phydev)) + return -EOPNOTSUPP; + + ops = ethtool_phy_ops; + if (!ops || !ops->set_plca_cfg) + return -EOPNOTSUPP; + + memset(&plca_cfg, 0xff, sizeof(plca_cfg)); + plca_update_sint(&plca_cfg.enabled, tb, ETHTOOL_A_PLCA_ENABLED, &mod); + plca_update_sint(&plca_cfg.node_id, tb, ETHTOOL_A_PLCA_NODE_ID, &mod); + plca_update_sint(&plca_cfg.node_cnt, tb, ETHTOOL_A_PLCA_NODE_CNT, &mod); + plca_update_sint(&plca_cfg.to_tmr, tb, ETHTOOL_A_PLCA_TO_TMR, &mod); + plca_update_sint(&plca_cfg.burst_cnt, tb, ETHTOOL_A_PLCA_BURST_CNT, + &mod); + plca_update_sint(&plca_cfg.burst_tmr, tb, ETHTOOL_A_PLCA_BURST_TMR, + &mod); + if (!mod) + return 0; + + ret = ops->set_plca_cfg(phydev, &plca_cfg, info->extack); + return ret < 0 ? ret : 1; +} + +const struct ethnl_request_ops ethnl_plca_cfg_request_ops = { + .request_cmd = ETHTOOL_MSG_PLCA_GET_CFG, + .reply_cmd = ETHTOOL_MSG_PLCA_GET_CFG_REPLY, + .hdr_attr = ETHTOOL_A_PLCA_HEADER, + .req_info_size = sizeof(struct plca_req_info), + .reply_data_size = sizeof(struct plca_reply_data), + + .prepare_data = plca_get_cfg_prepare_data, + .reply_size = plca_get_cfg_reply_size, + .fill_reply = plca_get_cfg_fill_reply, + + .set = ethnl_set_plca, + .set_ntf_cmd = ETHTOOL_MSG_PLCA_NTF, +}; + +// PLCA get status message -------------------------------------------------- // + +const struct nla_policy ethnl_plca_get_status_policy[] = { + [ETHTOOL_A_PLCA_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy_phy), +}; + +static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct plca_reply_data *data = PLCA_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + const struct ethtool_phy_ops *ops; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; + int ret; + + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER, + info->extack); + // check that the PHY device is available and connected + if (IS_ERR_OR_NULL(phydev)) { + ret = -EOPNOTSUPP; + goto out; + } + + // note: rtnl_lock is held already by ethnl_default_doit + ops = ethtool_phy_ops; + if (!ops || !ops->get_plca_status) { + ret = -EOPNOTSUPP; + goto out; + } + + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto out; + + memset(&data->plca_st, 0xff, + sizeof_field(struct plca_reply_data, plca_st)); + + ret = ops->get_plca_status(phydev, &data->plca_st); + ethnl_ops_complete(dev); +out: + return ret; +} + +static int plca_get_status_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + return nla_total_size(sizeof(u8)); /* _STATUS */ +} + +static int plca_get_status_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct plca_reply_data *data = PLCA_REPDATA(reply_base); + const u8 status = data->plca_st.pst; + + if (nla_put_u8(skb, ETHTOOL_A_PLCA_STATUS, !!status)) + return -EMSGSIZE; + + return 0; +}; + +const struct ethnl_request_ops ethnl_plca_status_request_ops = { + .request_cmd = ETHTOOL_MSG_PLCA_GET_STATUS, + .reply_cmd = ETHTOOL_MSG_PLCA_GET_STATUS_REPLY, + .hdr_attr = ETHTOOL_A_PLCA_HEADER, + .req_info_size = sizeof(struct plca_req_info), + .reply_data_size = sizeof(struct plca_reply_data), + + .prepare_data = plca_get_status_prepare_data, + .reply_size = plca_get_status_reply_size, + .fill_reply = plca_get_status_fill_reply, +}; diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c new file mode 100644 index 000000000000..297be6a13ab9 --- /dev/null +++ b/net/ethtool/privflags.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "netlink.h" +#include "common.h" +#include "bitset.h" + +struct privflags_req_info { + struct ethnl_req_info base; +}; + +struct privflags_reply_data { + struct ethnl_reply_data base; + const char (*priv_flag_names)[ETH_GSTRING_LEN]; + unsigned int n_priv_flags; + u32 priv_flags; +}; + +#define PRIVFLAGS_REPDATA(__reply_base) \ + container_of(__reply_base, struct privflags_reply_data, base) + +const struct nla_policy ethnl_privflags_get_policy[] = { + [ETHTOOL_A_PRIVFLAGS_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int ethnl_get_priv_flags_info(struct net_device *dev, + unsigned int *count, + const char (**names)[ETH_GSTRING_LEN]) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + int nflags; + + nflags = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); + if (nflags < 0) + return nflags; + + if (names) { + *names = kcalloc(nflags, ETH_GSTRING_LEN, GFP_KERNEL); + if (!*names) + return -ENOMEM; + ops->get_strings(dev, ETH_SS_PRIV_FLAGS, (u8 *)*names); + } + + /* We can pass more than 32 private flags to userspace via netlink but + * we cannot get more with ethtool_ops::get_priv_flags(). Note that we + * must not adjust nflags before allocating the space for flag names + * as the buffer must be large enough for all flags. + */ + if (WARN_ONCE(nflags > 32, + "device %s reports more than 32 private flags (%d)\n", + netdev_name(dev), nflags)) + nflags = 32; + *count = nflags; + + return 0; +} + +static int privflags_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + const char (*names)[ETH_GSTRING_LEN]; + const struct ethtool_ops *ops; + unsigned int nflags; + int ret; + + ops = dev->ethtool_ops; + if (!ops->get_priv_flags || !ops->get_sset_count || !ops->get_strings) + return -EOPNOTSUPP; + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + ret = ethnl_get_priv_flags_info(dev, &nflags, &names); + if (ret < 0) + goto out_ops; + data->priv_flags = ops->get_priv_flags(dev); + data->priv_flag_names = names; + data->n_priv_flags = nflags; + +out_ops: + ethnl_ops_complete(dev); + return ret; +} + +static int privflags_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const u32 all_flags = ~(u32)0 >> (32 - data->n_priv_flags); + + return ethnl_bitset32_size(&data->priv_flags, &all_flags, + data->n_priv_flags, + data->priv_flag_names, compact); +} + +static int privflags_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const u32 all_flags = ~(u32)0 >> (32 - data->n_priv_flags); + + return ethnl_put_bitset32(skb, ETHTOOL_A_PRIVFLAGS_FLAGS, + &data->priv_flags, &all_flags, + data->n_priv_flags, data->priv_flag_names, + compact); +} + +static void privflags_cleanup_data(struct ethnl_reply_data *reply_data) +{ + struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_data); + + kfree(data->priv_flag_names); +} + +/* PRIVFLAGS_SET */ + +const struct nla_policy ethnl_privflags_set_policy[] = { + [ETHTOOL_A_PRIVFLAGS_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_PRIVFLAGS_FLAGS] = { .type = NLA_NESTED }, +}; + +static int +ethnl_set_privflags_validate(struct ethnl_req_info *req_info, + struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + + if (!info->attrs[ETHTOOL_A_PRIVFLAGS_FLAGS]) + return -EINVAL; + + if (!ops->get_priv_flags || !ops->set_priv_flags || + !ops->get_sset_count || !ops->get_strings) + return -EOPNOTSUPP; + return 1; +} + +static int +ethnl_set_privflags(struct ethnl_req_info *req_info, struct genl_info *info) +{ + const char (*names)[ETH_GSTRING_LEN] = NULL; + struct net_device *dev = req_info->dev; + struct nlattr **tb = info->attrs; + unsigned int nflags; + bool mod = false; + bool compact; + u32 flags; + int ret; + + ret = ethnl_bitset_is_compact(tb[ETHTOOL_A_PRIVFLAGS_FLAGS], &compact); + if (ret < 0) + return ret; + + ret = ethnl_get_priv_flags_info(dev, &nflags, compact ? NULL : &names); + if (ret < 0) + return ret; + flags = dev->ethtool_ops->get_priv_flags(dev); + + ret = ethnl_update_bitset32(&flags, nflags, + tb[ETHTOOL_A_PRIVFLAGS_FLAGS], names, + info->extack, &mod); + if (ret < 0 || !mod) + goto out_free; + ret = dev->ethtool_ops->set_priv_flags(dev, flags); + if (ret < 0) + goto out_free; + ret = 1; + +out_free: + kfree(names); + return ret; +} + +const struct ethnl_request_ops ethnl_privflags_request_ops = { + .request_cmd = ETHTOOL_MSG_PRIVFLAGS_GET, + .reply_cmd = ETHTOOL_MSG_PRIVFLAGS_GET_REPLY, + .hdr_attr = ETHTOOL_A_PRIVFLAGS_HEADER, + .req_info_size = sizeof(struct privflags_req_info), + .reply_data_size = sizeof(struct privflags_reply_data), + + .prepare_data = privflags_prepare_data, + .reply_size = privflags_reply_size, + .fill_reply = privflags_fill_reply, + .cleanup_data = privflags_cleanup_data, + + .set_validate = ethnl_set_privflags_validate, + .set = ethnl_set_privflags, + .set_ntf_cmd = ETHTOOL_MSG_PRIVFLAGS_NTF, +}; diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c new file mode 100644 index 000000000000..24def9c9dd54 --- /dev/null +++ b/net/ethtool/pse-pd.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// ethtool interface for Ethernet PSE (Power Sourcing Equipment) +// and PD (Powered Device) +// +// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> +// + +#include "common.h" +#include "linux/pse-pd/pse.h" +#include "netlink.h" +#include <linux/ethtool_netlink.h> +#include <linux/ethtool.h> +#include <linux/export.h> +#include <linux/phy.h> + +struct pse_req_info { + struct ethnl_req_info base; +}; + +struct pse_reply_data { + struct ethnl_reply_data base; + struct ethtool_pse_control_status status; +}; + +#define PSE_REPDATA(__reply_base) \ + container_of(__reply_base, struct pse_reply_data, base) + +/* PSE_GET */ + +const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1] = { + [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_phy), +}; + +static int pse_get_pse_attributes(struct phy_device *phydev, + struct netlink_ext_ack *extack, + struct pse_reply_data *data) +{ + if (!phydev) { + NL_SET_ERR_MSG(extack, "No PHY found"); + return -EOPNOTSUPP; + } + + if (!phydev->psec) { + NL_SET_ERR_MSG(extack, "No PSE is attached"); + return -EOPNOTSUPP; + } + + memset(&data->status, 0, sizeof(data->status)); + + return pse_ethtool_get_status(phydev->psec, extack, &data->status); +} + +static int pse_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct pse_reply_data *data = PSE_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; + int ret; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PSE_HEADER, + info->extack); + if (IS_ERR(phydev)) + return -ENODEV; + + ret = pse_get_pse_attributes(phydev, info->extack, data); + + ethnl_ops_complete(dev); + + return ret; +} + +static int pse_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct pse_reply_data *data = PSE_REPDATA(reply_base); + const struct ethtool_pse_control_status *st = &data->status; + int len = 0; + + if (st->pw_d_id) + len += nla_total_size(sizeof(u32)); /* _PSE_PW_D_ID */ + if (st->podl_admin_state > 0) + len += nla_total_size(sizeof(u32)); /* _PODL_PSE_ADMIN_STATE */ + if (st->podl_pw_status > 0) + len += nla_total_size(sizeof(u32)); /* _PODL_PSE_PW_D_STATUS */ + if (st->c33_admin_state > 0) + len += nla_total_size(sizeof(u32)); /* _C33_PSE_ADMIN_STATE */ + if (st->c33_pw_status > 0) + len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_D_STATUS */ + if (st->c33_pw_class > 0) + len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_CLASS */ + if (st->c33_actual_pw > 0) + len += nla_total_size(sizeof(u32)); /* _C33_PSE_ACTUAL_PW */ + if (st->c33_ext_state_info.c33_pse_ext_state > 0) { + len += nla_total_size(sizeof(u32)); /* _C33_PSE_EXT_STATE */ + if (st->c33_ext_state_info.__c33_pse_ext_substate > 0) + /* _C33_PSE_EXT_SUBSTATE */ + len += nla_total_size(sizeof(u32)); + } + if (st->c33_avail_pw_limit > 0) + /* _C33_AVAIL_PSE_PW_LIMIT */ + len += nla_total_size(sizeof(u32)); + if (st->c33_pw_limit_nb_ranges > 0) + /* _C33_PSE_PW_LIMIT_RANGES */ + len += st->c33_pw_limit_nb_ranges * + (nla_total_size(0) + + nla_total_size(sizeof(u32)) * 2); + if (st->prio_max) + /* _PSE_PRIO_MAX + _PSE_PRIO */ + len += nla_total_size(sizeof(u32)) * 2; + + return len; +} + +static int pse_put_pw_limit_ranges(struct sk_buff *skb, + const struct ethtool_pse_control_status *st) +{ + const struct ethtool_c33_pse_pw_limit_range *pw_limit_ranges; + int i; + + pw_limit_ranges = st->c33_pw_limit_ranges; + for (i = 0; i < st->c33_pw_limit_nb_ranges; i++) { + struct nlattr *nest; + + nest = nla_nest_start(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MIN, + pw_limit_ranges->min) || + nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MAX, + pw_limit_ranges->max)) { + nla_nest_cancel(skb, nest); + return -EMSGSIZE; + } + nla_nest_end(skb, nest); + pw_limit_ranges++; + } + + return 0; +} + +static int pse_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct pse_reply_data *data = PSE_REPDATA(reply_base); + const struct ethtool_pse_control_status *st = &data->status; + + if (st->pw_d_id && + nla_put_u32(skb, ETHTOOL_A_PSE_PW_D_ID, + st->pw_d_id)) + return -EMSGSIZE; + + if (st->podl_admin_state > 0 && + nla_put_u32(skb, ETHTOOL_A_PODL_PSE_ADMIN_STATE, + st->podl_admin_state)) + return -EMSGSIZE; + + if (st->podl_pw_status > 0 && + nla_put_u32(skb, ETHTOOL_A_PODL_PSE_PW_D_STATUS, + st->podl_pw_status)) + return -EMSGSIZE; + + if (st->c33_admin_state > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_ADMIN_STATE, + st->c33_admin_state)) + return -EMSGSIZE; + + if (st->c33_pw_status > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_D_STATUS, + st->c33_pw_status)) + return -EMSGSIZE; + + if (st->c33_pw_class > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_CLASS, + st->c33_pw_class)) + return -EMSGSIZE; + + if (st->c33_actual_pw > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_ACTUAL_PW, + st->c33_actual_pw)) + return -EMSGSIZE; + + if (st->c33_ext_state_info.c33_pse_ext_state > 0) { + if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_STATE, + st->c33_ext_state_info.c33_pse_ext_state)) + return -EMSGSIZE; + + if (st->c33_ext_state_info.__c33_pse_ext_substate > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_SUBSTATE, + st->c33_ext_state_info.__c33_pse_ext_substate)) + return -EMSGSIZE; + } + + if (st->c33_avail_pw_limit > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT, + st->c33_avail_pw_limit)) + return -EMSGSIZE; + + if (st->c33_pw_limit_nb_ranges > 0 && + pse_put_pw_limit_ranges(skb, st)) + return -EMSGSIZE; + + if (st->prio_max && + (nla_put_u32(skb, ETHTOOL_A_PSE_PRIO_MAX, st->prio_max) || + nla_put_u32(skb, ETHTOOL_A_PSE_PRIO, st->prio))) + return -EMSGSIZE; + + return 0; +} + +static void pse_cleanup_data(struct ethnl_reply_data *reply_base) +{ + const struct pse_reply_data *data = PSE_REPDATA(reply_base); + + kfree(data->status.c33_pw_limit_ranges); +} + +/* PSE_SET */ + +const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = { + [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_phy), + [ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] = + NLA_POLICY_RANGE(NLA_U32, ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED, + ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED), + [ETHTOOL_A_C33_PSE_ADMIN_CONTROL] = + NLA_POLICY_RANGE(NLA_U32, ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED, + ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED), + [ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT] = { .type = NLA_U32 }, + [ETHTOOL_A_PSE_PRIO] = { .type = NLA_U32 }, +}; + +static int +ethnl_set_pse_validate(struct phy_device *phydev, struct genl_info *info) +{ + struct nlattr **tb = info->attrs; + + if (IS_ERR_OR_NULL(phydev)) { + NL_SET_ERR_MSG(info->extack, "No PHY is attached"); + return -EOPNOTSUPP; + } + + if (!phydev->psec) { + NL_SET_ERR_MSG(info->extack, "No PSE is attached"); + return -EOPNOTSUPP; + } + + if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] && + !pse_has_podl(phydev->psec)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL], + "setting PoDL PSE admin control not supported"); + return -EOPNOTSUPP; + } + if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL] && + !pse_has_c33(phydev->psec)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL], + "setting C33 PSE admin control not supported"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct nlattr **tb = info->attrs; + struct phy_device *phydev; + int ret; + + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PSE_HEADER, + info->extack); + ret = ethnl_set_pse_validate(phydev, info); + if (ret) + return ret; + + if (tb[ETHTOOL_A_PSE_PRIO]) { + unsigned int prio; + + prio = nla_get_u32(tb[ETHTOOL_A_PSE_PRIO]); + ret = pse_ethtool_set_prio(phydev->psec, info->extack, prio); + if (ret) + return ret; + } + + if (tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]) { + unsigned int pw_limit; + + pw_limit = nla_get_u32(tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]); + ret = pse_ethtool_set_pw_limit(phydev->psec, info->extack, + pw_limit); + if (ret) + return ret; + } + + /* These values are already validated by the ethnl_pse_set_policy */ + if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] || + tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) { + struct pse_control_config config = {}; + + if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]) + config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]); + if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) + config.c33_admin_control = nla_get_u32(tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]); + + /* pse_ethtool_set_config() will do nothing if the config + * is zero + */ + ret = pse_ethtool_set_config(phydev->psec, info->extack, + &config); + if (ret) + return ret; + } + + /* Return errno or zero - PSE has no notification */ + return ret; +} + +const struct ethnl_request_ops ethnl_pse_request_ops = { + .request_cmd = ETHTOOL_MSG_PSE_GET, + .reply_cmd = ETHTOOL_MSG_PSE_GET_REPLY, + .hdr_attr = ETHTOOL_A_PSE_HEADER, + .req_info_size = sizeof(struct pse_req_info), + .reply_data_size = sizeof(struct pse_reply_data), + + .prepare_data = pse_prepare_data, + .reply_size = pse_reply_size, + .fill_reply = pse_fill_reply, + .cleanup_data = pse_cleanup_data, + + .set = ethnl_set_pse, + /* PSE has no notification */ +}; + +void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notifs) +{ + void *reply_payload; + struct sk_buff *skb; + int reply_len; + int ret; + + ASSERT_RTNL(); + + if (!netdev || !notifs) + return; + + reply_len = ethnl_reply_header_size() + + nla_total_size(sizeof(u32)); /* _PSE_NTF_EVENTS */ + + skb = genlmsg_new(reply_len, GFP_KERNEL); + if (!skb) + return; + + reply_payload = ethnl_bcastmsg_put(skb, ETHTOOL_MSG_PSE_NTF); + if (!reply_payload) + goto err_skb; + + ret = ethnl_fill_reply_header(skb, netdev, ETHTOOL_A_PSE_NTF_HEADER); + if (ret < 0) + goto err_skb; + + if (nla_put_uint(skb, ETHTOOL_A_PSE_NTF_EVENTS, notifs)) + goto err_skb; + + genlmsg_end(skb, reply_payload); + ethnl_multicast(skb, netdev); + return; + +err_skb: + nlmsg_free(skb); +} +EXPORT_SYMBOL_GPL(ethnl_pse_send_ntf); diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c new file mode 100644 index 000000000000..aeedd5ec6b8c --- /dev/null +++ b/net/ethtool/rings.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <net/netdev_queues.h> + +#include "netlink.h" +#include "common.h" + +struct rings_req_info { + struct ethnl_req_info base; +}; + +struct rings_reply_data { + struct ethnl_reply_data base; + struct ethtool_ringparam ringparam; + struct kernel_ethtool_ringparam kernel_ringparam; + u32 supported_ring_params; +}; + +#define RINGS_REPDATA(__reply_base) \ + container_of(__reply_base, struct rings_reply_data, base) + +const struct nla_policy ethnl_rings_get_policy[] = { + [ETHTOOL_A_RINGS_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int rings_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct rings_reply_data *data = RINGS_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + if (!dev->ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + data->supported_ring_params = dev->ethtool_ops->supported_ring_params; + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + data->kernel_ringparam.tcp_data_split = dev->cfg->hds_config; + data->kernel_ringparam.hds_thresh = dev->cfg->hds_thresh; + + dev->ethtool_ops->get_ringparam(dev, &data->ringparam, + &data->kernel_ringparam, info->extack); + ethnl_ops_complete(dev); + + return 0; +} + +static int rings_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */ + nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ + nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ + nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */ + nla_total_size(sizeof(u32)) + /* _RINGS_RX */ + nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ + nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ + nla_total_size(sizeof(u32)) + /* _RINGS_TX */ + nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ + nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ + nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */ + nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */ + nla_total_size(sizeof(u8))) + /* _RINGS_RX_PUSH */ + nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN */ + nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN_MAX */ + nla_total_size(sizeof(u32)) + /* _RINGS_HDS_THRESH */ + nla_total_size(sizeof(u32)); /* _RINGS_HDS_THRESH_MAX*/ +} + +static int rings_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct rings_reply_data *data = RINGS_REPDATA(reply_base); + const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; + const struct ethtool_ringparam *ringparam = &data->ringparam; + u32 supported_ring_params = data->supported_ring_params; + + WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); + + if ((ringparam->rx_max_pending && + (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, + ringparam->rx_max_pending) || + nla_put_u32(skb, ETHTOOL_A_RINGS_RX, + ringparam->rx_pending))) || + (ringparam->rx_mini_max_pending && + (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX, + ringparam->rx_mini_max_pending) || + nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI, + ringparam->rx_mini_pending))) || + (ringparam->rx_jumbo_max_pending && + (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX, + ringparam->rx_jumbo_max_pending) || + nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO, + ringparam->rx_jumbo_pending))) || + (ringparam->tx_max_pending && + (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, + ringparam->tx_max_pending) || + nla_put_u32(skb, ETHTOOL_A_RINGS_TX, + ringparam->tx_pending))) || + (kr->rx_buf_len && + (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || + (kr->tcp_data_split && + (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, + kr->tcp_data_split))) || + (kr->cqe_size && + (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) || + nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) || + nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push) || + ((supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN) && + (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, + kr->tx_push_buf_max_len) || + nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, + kr->tx_push_buf_len))) || + ((supported_ring_params & ETHTOOL_RING_USE_HDS_THRS) && + (nla_put_u32(skb, ETHTOOL_A_RINGS_HDS_THRESH, + kr->hds_thresh) || + nla_put_u32(skb, ETHTOOL_A_RINGS_HDS_THRESH_MAX, + kr->hds_thresh_max)))) + return -EMSGSIZE; + + return 0; +} + +/* RINGS_SET */ + +const struct nla_policy ethnl_rings_set_policy[] = { + [ETHTOOL_A_RINGS_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, + [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, + [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, + [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, + [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), + [ETHTOOL_A_RINGS_TCP_DATA_SPLIT] = + NLA_POLICY_MAX(NLA_U8, ETHTOOL_TCP_DATA_SPLIT_ENABLED), + [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), + [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), + [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), + [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .type = NLA_U32 }, + [ETHTOOL_A_RINGS_HDS_THRESH] = { .type = NLA_U32 }, +}; + +static int +ethnl_set_rings_validate(struct ethnl_req_info *req_info, + struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + struct nlattr **tb = info->attrs; + + if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_RX_BUF_LEN], + "setting rx buf len not supported"); + return -EOPNOTSUPP; + } + + if (tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_TCP_DATA_SPLIT)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], + "setting TCP data split is not supported"); + return -EOPNOTSUPP; + } + + if (tb[ETHTOOL_A_RINGS_HDS_THRESH] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_HDS_THRS)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_HDS_THRESH], + "setting hds-thresh is not supported"); + return -EOPNOTSUPP; + } + + if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_CQE_SIZE], + "setting cqe size not supported"); + return -EOPNOTSUPP; + } + + if (tb[ETHTOOL_A_RINGS_TX_PUSH] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_TX_PUSH], + "setting tx push not supported"); + return -EOPNOTSUPP; + } + + if (tb[ETHTOOL_A_RINGS_RX_PUSH] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_RX_PUSH], + "setting rx push not supported"); + return -EOPNOTSUPP; + } + + if (tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], + "setting tx push buf len is not supported"); + return -EOPNOTSUPP; + } + + return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP; +} + +static int +ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct kernel_ethtool_ringparam kernel_ringparam; + struct net_device *dev = req_info->dev; + struct ethtool_ringparam ringparam; + struct nlattr **tb = info->attrs; + const struct nlattr *err_attr; + bool mod = false; + int ret; + + ethtool_ringparam_get_cfg(dev, &ringparam, &kernel_ringparam, + info->extack); + + ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); + ethnl_update_u32(&ringparam.rx_mini_pending, + tb[ETHTOOL_A_RINGS_RX_MINI], &mod); + ethnl_update_u32(&ringparam.rx_jumbo_pending, + tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); + ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); + ethnl_update_u32(&kernel_ringparam.rx_buf_len, + tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); + ethnl_update_u8(&kernel_ringparam.tcp_data_split, + tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], &mod); + ethnl_update_u32(&kernel_ringparam.cqe_size, + tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); + ethnl_update_u8(&kernel_ringparam.tx_push, + tb[ETHTOOL_A_RINGS_TX_PUSH], &mod); + ethnl_update_u8(&kernel_ringparam.rx_push, + tb[ETHTOOL_A_RINGS_RX_PUSH], &mod); + ethnl_update_u32(&kernel_ringparam.tx_push_buf_len, + tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], &mod); + ethnl_update_u32(&kernel_ringparam.hds_thresh, + tb[ETHTOOL_A_RINGS_HDS_THRESH], &mod); + if (!mod) + return 0; + + if (kernel_ringparam.tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && + dev_xdp_sb_prog_count(dev)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], + "tcp-data-split can not be enabled with single buffer XDP"); + return -EINVAL; + } + + if (dev_get_min_mp_channel_count(dev)) { + if (kernel_ringparam.tcp_data_split != + ETHTOOL_TCP_DATA_SPLIT_ENABLED) { + NL_SET_ERR_MSG(info->extack, + "can't disable tcp-data-split while device has memory provider enabled"); + return -EINVAL; + } else if (kernel_ringparam.hds_thresh) { + NL_SET_ERR_MSG(info->extack, + "can't set non-zero hds_thresh while device is memory provider enabled"); + return -EINVAL; + } + } + + /* ensure new ring parameters are within limits */ + if (ringparam.rx_pending > ringparam.rx_max_pending) + err_attr = tb[ETHTOOL_A_RINGS_RX]; + else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) + err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; + else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) + err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; + else if (ringparam.tx_pending > ringparam.tx_max_pending) + err_attr = tb[ETHTOOL_A_RINGS_TX]; + else if (kernel_ringparam.hds_thresh > kernel_ringparam.hds_thresh_max) + err_attr = tb[ETHTOOL_A_RINGS_HDS_THRESH]; + else + err_attr = NULL; + if (err_attr) { + NL_SET_ERR_MSG_ATTR(info->extack, err_attr, + "requested ring size exceeds maximum"); + return -EINVAL; + } + + if (kernel_ringparam.tx_push_buf_len > kernel_ringparam.tx_push_buf_max_len) { + NL_SET_ERR_MSG_ATTR_FMT(info->extack, tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], + "Requested TX push buffer exceeds the maximum of %u", + kernel_ringparam.tx_push_buf_max_len); + + return -EINVAL; + } + + dev->cfg_pending->hds_config = kernel_ringparam.tcp_data_split; + dev->cfg_pending->hds_thresh = kernel_ringparam.hds_thresh; + + ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, + &kernel_ringparam, info->extack); + return ret < 0 ? ret : 1; +} + +const struct ethnl_request_ops ethnl_rings_request_ops = { + .request_cmd = ETHTOOL_MSG_RINGS_GET, + .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, + .hdr_attr = ETHTOOL_A_RINGS_HEADER, + .req_info_size = sizeof(struct rings_req_info), + .reply_data_size = sizeof(struct rings_reply_data), + + .prepare_data = rings_prepare_data, + .reply_size = rings_reply_size, + .fill_reply = rings_fill_reply, + + .set_validate = ethnl_set_rings_validate, + .set = ethnl_set_rings, + .set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF, +}; diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c new file mode 100644 index 000000000000..4dced53be4b3 --- /dev/null +++ b/net/ethtool/rss.c @@ -0,0 +1,1205 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <net/netdev_lock.h> + +#include "netlink.h" +#include "common.h" + +struct rss_req_info { + struct ethnl_req_info base; + u32 rss_context; +}; + +struct rss_reply_data { + struct ethnl_reply_data base; + bool has_flow_hash; + bool no_key_fields; + u32 indir_size; + u32 hkey_size; + u32 hfunc; + u32 input_xfrm; + u32 *indir_table; + u8 *hkey; + int flow_hash[__ETHTOOL_A_FLOW_CNT]; +}; + +static const u8 ethtool_rxfh_ft_nl2ioctl[] = { + [ETHTOOL_A_FLOW_ETHER] = ETHER_FLOW, + [ETHTOOL_A_FLOW_IP4] = IPV4_FLOW, + [ETHTOOL_A_FLOW_IP6] = IPV6_FLOW, + [ETHTOOL_A_FLOW_TCP4] = TCP_V4_FLOW, + [ETHTOOL_A_FLOW_UDP4] = UDP_V4_FLOW, + [ETHTOOL_A_FLOW_SCTP4] = SCTP_V4_FLOW, + [ETHTOOL_A_FLOW_AH_ESP4] = AH_ESP_V4_FLOW, + [ETHTOOL_A_FLOW_TCP6] = TCP_V6_FLOW, + [ETHTOOL_A_FLOW_UDP6] = UDP_V6_FLOW, + [ETHTOOL_A_FLOW_SCTP6] = SCTP_V6_FLOW, + [ETHTOOL_A_FLOW_AH_ESP6] = AH_ESP_V6_FLOW, + [ETHTOOL_A_FLOW_AH4] = AH_V4_FLOW, + [ETHTOOL_A_FLOW_ESP4] = ESP_V4_FLOW, + [ETHTOOL_A_FLOW_AH6] = AH_V6_FLOW, + [ETHTOOL_A_FLOW_ESP6] = ESP_V6_FLOW, + [ETHTOOL_A_FLOW_GTPU4] = GTPU_V4_FLOW, + [ETHTOOL_A_FLOW_GTPU6] = GTPU_V6_FLOW, + [ETHTOOL_A_FLOW_GTPC4] = GTPC_V4_FLOW, + [ETHTOOL_A_FLOW_GTPC6] = GTPC_V6_FLOW, + [ETHTOOL_A_FLOW_GTPC_TEID4] = GTPC_TEID_V4_FLOW, + [ETHTOOL_A_FLOW_GTPC_TEID6] = GTPC_TEID_V6_FLOW, + [ETHTOOL_A_FLOW_GTPU_EH4] = GTPU_EH_V4_FLOW, + [ETHTOOL_A_FLOW_GTPU_EH6] = GTPU_EH_V6_FLOW, + [ETHTOOL_A_FLOW_GTPU_UL4] = GTPU_UL_V4_FLOW, + [ETHTOOL_A_FLOW_GTPU_UL6] = GTPU_UL_V6_FLOW, + [ETHTOOL_A_FLOW_GTPU_DL4] = GTPU_DL_V4_FLOW, + [ETHTOOL_A_FLOW_GTPU_DL6] = GTPU_DL_V6_FLOW, +}; + +#define RSS_REQINFO(__req_base) \ + container_of(__req_base, struct rss_req_info, base) + +#define RSS_REPDATA(__reply_base) \ + container_of(__reply_base, struct rss_reply_data, base) + +const struct nla_policy ethnl_rss_get_policy[] = { + [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_RSS_CONTEXT] = { .type = NLA_U32 }, + [ETHTOOL_A_RSS_START_CONTEXT] = { .type = NLA_U32 }, +}; + +static int +rss_parse_request(struct ethnl_req_info *req_info, struct nlattr **tb, + struct netlink_ext_ack *extack) +{ + struct rss_req_info *request = RSS_REQINFO(req_info); + + if (tb[ETHTOOL_A_RSS_CONTEXT]) + request->rss_context = nla_get_u32(tb[ETHTOOL_A_RSS_CONTEXT]); + if (tb[ETHTOOL_A_RSS_START_CONTEXT]) { + NL_SET_BAD_ATTR(extack, tb[ETHTOOL_A_RSS_START_CONTEXT]); + return -EINVAL; + } + + return 0; +} + +static void +rss_prepare_flow_hash(const struct rss_req_info *req, struct net_device *dev, + struct rss_reply_data *data, const struct genl_info *info) +{ + int i; + + data->has_flow_hash = false; + + if (!dev->ethtool_ops->get_rxfh_fields) + return; + if (req->rss_context && !dev->ethtool_ops->rxfh_per_ctx_fields) + return; + + mutex_lock(&dev->ethtool->rss_lock); + for (i = 1; i < __ETHTOOL_A_FLOW_CNT; i++) { + struct ethtool_rxfh_fields fields = { + .flow_type = ethtool_rxfh_ft_nl2ioctl[i], + .rss_context = req->rss_context, + }; + + if (dev->ethtool_ops->get_rxfh_fields(dev, &fields)) { + data->flow_hash[i] = -1; /* Unsupported */ + continue; + } + + data->flow_hash[i] = fields.data; + data->has_flow_hash = true; + } + mutex_unlock(&dev->ethtool->rss_lock); +} + +static int +rss_get_data_alloc(struct net_device *dev, struct rss_reply_data *data) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + u32 total_size, indir_bytes; + u8 *rss_config; + + data->indir_size = 0; + data->hkey_size = 0; + if (ops->get_rxfh_indir_size) + data->indir_size = ops->get_rxfh_indir_size(dev); + if (ops->get_rxfh_key_size) + data->hkey_size = ops->get_rxfh_key_size(dev); + + indir_bytes = data->indir_size * sizeof(u32); + total_size = indir_bytes + data->hkey_size; + rss_config = kzalloc(total_size, GFP_KERNEL); + if (!rss_config) + return -ENOMEM; + + if (data->indir_size) + data->indir_table = (u32 *)rss_config; + if (data->hkey_size) + data->hkey = rss_config + indir_bytes; + + return 0; +} + +static void rss_get_data_free(const struct rss_reply_data *data) +{ + kfree(data->indir_table); +} + +static int +rss_prepare_get(const struct rss_req_info *request, struct net_device *dev, + struct rss_reply_data *data, const struct genl_info *info) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxfh_param rxfh = {}; + int ret; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + mutex_lock(&dev->ethtool->rss_lock); + + ret = rss_get_data_alloc(dev, data); + if (ret) + goto out_unlock; + + rxfh.indir_size = data->indir_size; + rxfh.indir = data->indir_table; + rxfh.key_size = data->hkey_size; + rxfh.key = data->hkey; + + ret = ops->get_rxfh(dev, &rxfh); + if (ret) + goto out_unlock; + + data->hfunc = rxfh.hfunc; + data->input_xfrm = rxfh.input_xfrm; +out_unlock: + mutex_unlock(&dev->ethtool->rss_lock); + ethnl_ops_complete(dev); + return ret; +} + +static void +__rss_prepare_ctx(struct net_device *dev, struct rss_reply_data *data, + struct ethtool_rxfh_context *ctx) +{ + if (WARN_ON_ONCE(data->indir_size != ctx->indir_size || + data->hkey_size != ctx->key_size)) + return; + + data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key; + + data->hfunc = ctx->hfunc; + data->input_xfrm = ctx->input_xfrm; + memcpy(data->indir_table, ethtool_rxfh_context_indir(ctx), + data->indir_size * sizeof(u32)); + if (data->hkey_size) + memcpy(data->hkey, ethtool_rxfh_context_key(ctx), + data->hkey_size); +} + +static int +rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev, + struct rss_reply_data *data, const struct genl_info *info) +{ + struct ethtool_rxfh_context *ctx; + u32 total_size, indir_bytes; + u8 *rss_config; + int ret; + + mutex_lock(&dev->ethtool->rss_lock); + ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context); + if (!ctx) { + ret = -ENOENT; + goto out_unlock; + } + + data->indir_size = ctx->indir_size; + data->hkey_size = ctx->key_size; + + indir_bytes = data->indir_size * sizeof(u32); + total_size = indir_bytes + data->hkey_size; + rss_config = kzalloc(total_size, GFP_KERNEL); + if (!rss_config) { + ret = -ENOMEM; + goto out_unlock; + } + + data->indir_table = (u32 *)rss_config; + if (data->hkey_size) + data->hkey = rss_config + indir_bytes; + + __rss_prepare_ctx(dev, data, ctx); + + ret = 0; +out_unlock: + mutex_unlock(&dev->ethtool->rss_lock); + return ret; +} + +static int +rss_prepare(const struct rss_req_info *request, struct net_device *dev, + struct rss_reply_data *data, const struct genl_info *info) +{ + rss_prepare_flow_hash(request, dev, data, info); + + /* Coming from RSS_SET, driver may only have flow_hash_fields ops */ + if (!dev->ethtool_ops->get_rxfh) + return 0; + + if (request->rss_context) + return rss_prepare_ctx(request, dev, data, info); + return rss_prepare_get(request, dev, data, info); +} + +static int +rss_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct rss_reply_data *data = RSS_REPDATA(reply_base); + struct rss_req_info *request = RSS_REQINFO(req_base); + struct net_device *dev = reply_base->dev; + const struct ethtool_ops *ops; + + ops = dev->ethtool_ops; + if (!ops->get_rxfh) + return -EOPNOTSUPP; + + /* Some drivers don't handle rss_context */ + if (request->rss_context && !ops->create_rxfh_context) + return -EOPNOTSUPP; + + return rss_prepare(request, dev, data, info); +} + +static int +rss_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct rss_reply_data *data = RSS_REPDATA(reply_base); + int len; + + len = nla_total_size(sizeof(u32)) + /* _RSS_CONTEXT */ + nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */ + nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */ + nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */ + nla_total_size(data->hkey_size) + /* _RSS_HKEY */ + nla_total_size(0) + /* _RSS_FLOW_HASH */ + nla_total_size(sizeof(u32)) * ETHTOOL_A_FLOW_MAX + + 0; + + return len; +} + +static int +rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct rss_reply_data *data = RSS_REPDATA(reply_base); + struct rss_req_info *request = RSS_REQINFO(req_base); + + if (request->rss_context && + nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context)) + return -EMSGSIZE; + + if ((data->indir_size && + nla_put(skb, ETHTOOL_A_RSS_INDIR, + sizeof(u32) * data->indir_size, data->indir_table))) + return -EMSGSIZE; + + if (!data->no_key_fields && + ((data->hfunc && + nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) || + (data->input_xfrm && + nla_put_u32(skb, ETHTOOL_A_RSS_INPUT_XFRM, data->input_xfrm)) || + (data->hkey_size && + nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey)))) + return -EMSGSIZE; + + if (data->has_flow_hash) { + struct nlattr *nest; + int i; + + nest = nla_nest_start(skb, ETHTOOL_A_RSS_FLOW_HASH); + if (!nest) + return -EMSGSIZE; + + for (i = 1; i < __ETHTOOL_A_FLOW_CNT; i++) { + if (data->flow_hash[i] >= 0 && + nla_put_uint(skb, i, data->flow_hash[i])) { + nla_nest_cancel(skb, nest); + return -EMSGSIZE; + } + } + + nla_nest_end(skb, nest); + } + + return 0; +} + +static void rss_cleanup_data(struct ethnl_reply_data *reply_base) +{ + const struct rss_reply_data *data = RSS_REPDATA(reply_base); + + rss_get_data_free(data); +} + +struct rss_nl_dump_ctx { + unsigned long ifindex; + unsigned long ctx_idx; + + /* User wants to only dump contexts from given ifindex */ + unsigned int match_ifindex; + unsigned int start_ctx; +}; + +static struct rss_nl_dump_ctx *rss_dump_ctx(struct netlink_callback *cb) +{ + NL_ASSERT_CTX_FITS(struct rss_nl_dump_ctx); + + return (struct rss_nl_dump_ctx *)cb->ctx; +} + +int ethnl_rss_dump_start(struct netlink_callback *cb) +{ + const struct genl_info *info = genl_info_dump(cb); + struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb); + struct ethnl_req_info req_info = {}; + struct nlattr **tb = info->attrs; + int ret; + + /* Filtering by context not supported */ + if (tb[ETHTOOL_A_RSS_CONTEXT]) { + NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT]); + return -EINVAL; + } + if (tb[ETHTOOL_A_RSS_START_CONTEXT]) { + ctx->start_ctx = nla_get_u32(tb[ETHTOOL_A_RSS_START_CONTEXT]); + ctx->ctx_idx = ctx->start_ctx; + } + + ret = ethnl_parse_header_dev_get(&req_info, + tb[ETHTOOL_A_RSS_HEADER], + sock_net(cb->skb->sk), cb->extack, + false); + if (req_info.dev) { + ctx->match_ifindex = req_info.dev->ifindex; + ctx->ifindex = ctx->match_ifindex; + ethnl_parse_header_dev_put(&req_info); + req_info.dev = NULL; + } + + return ret; +} + +static int +rss_dump_one_ctx(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *dev, u32 rss_context) +{ + const struct genl_info *info = genl_info_dump(cb); + struct rss_reply_data data = {}; + struct rss_req_info req = {}; + void *ehdr; + int ret; + + req.rss_context = rss_context; + + ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_RSS_GET_REPLY); + if (!ehdr) + return -EMSGSIZE; + + ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_RSS_HEADER); + if (ret < 0) + goto err_cancel; + + ret = rss_prepare(&req, dev, &data, info); + if (ret) + goto err_cancel; + + ret = rss_fill_reply(skb, &req.base, &data.base); + if (ret) + goto err_cleanup; + genlmsg_end(skb, ehdr); + + rss_cleanup_data(&data.base); + return 0; + +err_cleanup: + rss_cleanup_data(&data.base); +err_cancel: + genlmsg_cancel(skb, ehdr); + return ret; +} + +static int +rss_dump_one_dev(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *dev) +{ + struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb); + int ret; + + if (!dev->ethtool_ops->get_rxfh) + return 0; + + if (!ctx->ctx_idx) { + ret = rss_dump_one_ctx(skb, cb, dev, 0); + if (ret) + return ret; + ctx->ctx_idx++; + } + + for (; xa_find(&dev->ethtool->rss_ctx, &ctx->ctx_idx, + ULONG_MAX, XA_PRESENT); ctx->ctx_idx++) { + ret = rss_dump_one_ctx(skb, cb, dev, ctx->ctx_idx); + if (ret) + return ret; + } + ctx->ctx_idx = ctx->start_ctx; + + return 0; +} + +int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb); + struct net *net = sock_net(skb->sk); + struct net_device *dev; + int ret = 0; + + rtnl_lock(); + for_each_netdev_dump(net, dev, ctx->ifindex) { + if (ctx->match_ifindex && ctx->match_ifindex != ctx->ifindex) + break; + + netdev_lock_ops(dev); + ret = rss_dump_one_dev(skb, cb, dev); + netdev_unlock_ops(dev); + if (ret) + break; + } + rtnl_unlock(); + + return ret; +} + +/* RSS_NTF */ + +static void ethnl_rss_delete_notify(struct net_device *dev, u32 rss_context) +{ + struct sk_buff *ntf; + size_t ntf_size; + void *hdr; + + ntf_size = ethnl_reply_header_size() + + nla_total_size(sizeof(u32)); /* _RSS_CONTEXT */ + + ntf = genlmsg_new(ntf_size, GFP_KERNEL); + if (!ntf) + goto out_warn; + + hdr = ethnl_bcastmsg_put(ntf, ETHTOOL_MSG_RSS_DELETE_NTF); + if (!hdr) + goto out_free_ntf; + + if (ethnl_fill_reply_header(ntf, dev, ETHTOOL_A_RSS_HEADER) || + nla_put_u32(ntf, ETHTOOL_A_RSS_CONTEXT, rss_context)) + goto out_free_ntf; + + genlmsg_end(ntf, hdr); + if (ethnl_multicast(ntf, dev)) + goto out_warn; + + return; + +out_free_ntf: + nlmsg_free(ntf); +out_warn: + pr_warn_once("Failed to send a RSS delete notification"); +} + +void ethtool_rss_notify(struct net_device *dev, u32 type, u32 rss_context) +{ + struct rss_req_info req_info = { + .rss_context = rss_context, + }; + + if (type == ETHTOOL_MSG_RSS_DELETE_NTF) + ethnl_rss_delete_notify(dev, rss_context); + else + ethnl_notify(dev, type, &req_info.base); +} + +/* RSS_SET */ + +#define RFH_MASK (RXH_L2DA | RXH_VLAN | RXH_IP_SRC | RXH_IP_DST | \ + RXH_L3_PROTO | RXH_L4_B_0_1 | RXH_L4_B_2_3 | \ + RXH_GTP_TEID | RXH_DISCARD) +#define RFH_MASKv6 (RFH_MASK | RXH_IP6_FL) + +static const struct nla_policy ethnl_rss_flows_policy[] = { + [ETHTOOL_A_FLOW_ETHER] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_IP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_IP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_TCP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_UDP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_SCTP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_AH_ESP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_TCP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_UDP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_SCTP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_AH_ESP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_AH4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_ESP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_AH6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_ESP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_GTPU4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_GTPU6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_GTPC4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_GTPC6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_GTPC_TEID4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_GTPC_TEID6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_GTPU_EH4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_GTPU_EH6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_GTPU_UL4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_GTPU_UL6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), + [ETHTOOL_A_FLOW_GTPU_DL4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK), + [ETHTOOL_A_FLOW_GTPU_DL6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6), +}; + +const struct nla_policy ethnl_rss_set_policy[ETHTOOL_A_RSS_FLOW_HASH + 1] = { + [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_RSS_CONTEXT] = { .type = NLA_U32, }, + [ETHTOOL_A_RSS_HFUNC] = NLA_POLICY_MIN(NLA_U32, 1), + [ETHTOOL_A_RSS_INDIR] = { .type = NLA_BINARY, }, + [ETHTOOL_A_RSS_HKEY] = NLA_POLICY_MIN(NLA_BINARY, 1), + [ETHTOOL_A_RSS_INPUT_XFRM] = + NLA_POLICY_MAX(NLA_U32, RXH_XFRM_SYM_OR_XOR), + [ETHTOOL_A_RSS_FLOW_HASH] = NLA_POLICY_NESTED(ethnl_rss_flows_policy), +}; + +static int +ethnl_rss_set_validate(struct ethnl_req_info *req_info, struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + struct rss_req_info *request = RSS_REQINFO(req_info); + struct nlattr **tb = info->attrs; + struct nlattr *bad_attr = NULL; + u32 input_xfrm; + + if (request->rss_context && !ops->create_rxfh_context) + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_CONTEXT]; + + if (request->rss_context && !ops->rxfh_per_ctx_key) { + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HFUNC]; + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HKEY]; + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM]; + } + + input_xfrm = nla_get_u32_default(tb[ETHTOOL_A_RSS_INPUT_XFRM], 0); + if (input_xfrm & ~ops->supported_input_xfrm) + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM]; + + if (tb[ETHTOOL_A_RSS_FLOW_HASH] && !ops->set_rxfh_fields) + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_FLOW_HASH]; + if (request->rss_context && + tb[ETHTOOL_A_RSS_FLOW_HASH] && !ops->rxfh_per_ctx_fields) + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_FLOW_HASH]; + + if (bad_attr) { + NL_SET_BAD_ATTR(info->extack, bad_attr); + return -EOPNOTSUPP; + } + + return 1; +} + +static int +rss_set_prep_indir(struct net_device *dev, struct genl_info *info, + struct rss_reply_data *data, struct ethtool_rxfh_param *rxfh, + bool *reset, bool *mod) +{ + struct netlink_ext_ack *extack = info->extack; + struct nlattr **tb = info->attrs; + size_t alloc_size; + int num_rx_rings; + u32 user_size; + int i, err; + + if (!tb[ETHTOOL_A_RSS_INDIR]) + return 0; + if (!data->indir_size) + return -EOPNOTSUPP; + + err = ethtool_get_rx_ring_count(dev); + if (err < 0) + return err; + num_rx_rings = err; + + if (nla_len(tb[ETHTOOL_A_RSS_INDIR]) % 4) { + NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_INDIR]); + return -EINVAL; + } + user_size = nla_len(tb[ETHTOOL_A_RSS_INDIR]) / 4; + if (!user_size) { + if (rxfh->rss_context) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_RSS_INDIR], + "can't reset table for a context"); + return -EINVAL; + } + *reset = true; + } else if (data->indir_size % user_size) { + NL_SET_ERR_MSG_ATTR_FMT(extack, tb[ETHTOOL_A_RSS_INDIR], + "size (%d) mismatch with device indir table (%d)", + user_size, data->indir_size); + return -EINVAL; + } + + rxfh->indir_size = data->indir_size; + alloc_size = array_size(data->indir_size, sizeof(rxfh->indir[0])); + rxfh->indir = kzalloc(alloc_size, GFP_KERNEL); + if (!rxfh->indir) + return -ENOMEM; + + nla_memcpy(rxfh->indir, tb[ETHTOOL_A_RSS_INDIR], alloc_size); + for (i = 0; i < user_size; i++) { + if (rxfh->indir[i] < num_rx_rings) + continue; + + NL_SET_ERR_MSG_ATTR_FMT(extack, tb[ETHTOOL_A_RSS_INDIR], + "entry %d: queue out of range (%d)", + i, rxfh->indir[i]); + err = -EINVAL; + goto err_free; + } + + if (user_size) { + /* Replicate the user-provided table to fill the device table */ + for (i = user_size; i < data->indir_size; i++) + rxfh->indir[i] = rxfh->indir[i % user_size]; + } else { + for (i = 0; i < data->indir_size; i++) + rxfh->indir[i] = + ethtool_rxfh_indir_default(i, num_rx_rings); + } + + *mod |= memcmp(rxfh->indir, data->indir_table, data->indir_size); + + return 0; + +err_free: + kfree(rxfh->indir); + rxfh->indir = NULL; + return err; +} + +static int +rss_set_prep_hkey(struct net_device *dev, struct genl_info *info, + struct rss_reply_data *data, struct ethtool_rxfh_param *rxfh, + bool *mod) +{ + struct nlattr **tb = info->attrs; + + if (!tb[ETHTOOL_A_RSS_HKEY]) + return 0; + + if (nla_len(tb[ETHTOOL_A_RSS_HKEY]) != data->hkey_size) { + NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_HKEY]); + return -EINVAL; + } + + rxfh->key_size = data->hkey_size; + rxfh->key = kmemdup(data->hkey, data->hkey_size, GFP_KERNEL); + if (!rxfh->key) + return -ENOMEM; + + ethnl_update_binary(rxfh->key, rxfh->key_size, tb[ETHTOOL_A_RSS_HKEY], + mod); + return 0; +} + +static int +rss_check_rxfh_fields_sym(struct net_device *dev, struct genl_info *info, + struct rss_reply_data *data, bool xfrm_sym) +{ + struct nlattr **tb = info->attrs; + int i; + + if (!xfrm_sym) + return 0; + if (!data->has_flow_hash) { + NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_RSS_INPUT_XFRM], + "hash field config not reported"); + return -EINVAL; + } + + for (i = 1; i < __ETHTOOL_A_FLOW_CNT; i++) + if (data->flow_hash[i] >= 0 && + !ethtool_rxfh_config_is_sym(data->flow_hash[i])) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RSS_INPUT_XFRM], + "hash field config is not symmetric"); + return -EINVAL; + } + + return 0; +} + +static int +ethnl_set_rss_fields(struct net_device *dev, struct genl_info *info, + u32 rss_context, struct rss_reply_data *data, + bool xfrm_sym, bool *mod) +{ + struct nlattr *flow_nest = info->attrs[ETHTOOL_A_RSS_FLOW_HASH]; + struct nlattr *flows[ETHTOOL_A_FLOW_MAX + 1]; + const struct ethtool_ops *ops; + int i, ret; + + ops = dev->ethtool_ops; + + ret = rss_check_rxfh_fields_sym(dev, info, data, xfrm_sym); + if (ret) + return ret; + + if (!flow_nest) + return 0; + + ret = nla_parse_nested(flows, ARRAY_SIZE(ethnl_rss_flows_policy) - 1, + flow_nest, ethnl_rss_flows_policy, info->extack); + if (ret < 0) + return ret; + + for (i = 1; i < __ETHTOOL_A_FLOW_CNT; i++) { + struct ethtool_rxfh_fields fields = { + .flow_type = ethtool_rxfh_ft_nl2ioctl[i], + .rss_context = rss_context, + }; + + if (!flows[i]) + continue; + + fields.data = nla_get_u32(flows[i]); + if (data->has_flow_hash && data->flow_hash[i] == fields.data) + continue; + + if (xfrm_sym && !ethtool_rxfh_config_is_sym(fields.data)) { + NL_SET_ERR_MSG_ATTR(info->extack, flows[i], + "conflict with xfrm-input"); + return -EINVAL; + } + + ret = ops->set_rxfh_fields(dev, &fields, info->extack); + if (ret) + return ret; + + *mod = true; + } + + return 0; +} + +static void +rss_set_ctx_update(struct ethtool_rxfh_context *ctx, struct nlattr **tb, + struct rss_reply_data *data, struct ethtool_rxfh_param *rxfh) +{ + int i; + + if (rxfh->indir) { + for (i = 0; i < data->indir_size; i++) + ethtool_rxfh_context_indir(ctx)[i] = rxfh->indir[i]; + ctx->indir_configured = !!nla_len(tb[ETHTOOL_A_RSS_INDIR]); + } + if (rxfh->key) { + memcpy(ethtool_rxfh_context_key(ctx), rxfh->key, + data->hkey_size); + ctx->key_configured = !!rxfh->key_size; + } + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE) + ctx->hfunc = rxfh->hfunc; + if (rxfh->input_xfrm != RXH_XFRM_NO_CHANGE) + ctx->input_xfrm = rxfh->input_xfrm; +} + +static int +ethnl_rss_set(struct ethnl_req_info *req_info, struct genl_info *info) +{ + bool indir_reset = false, indir_mod, xfrm_sym = false; + struct rss_req_info *request = RSS_REQINFO(req_info); + struct ethtool_rxfh_context *ctx = NULL; + struct net_device *dev = req_info->dev; + bool mod = false, fields_mod = false; + struct ethtool_rxfh_param rxfh = {}; + struct nlattr **tb = info->attrs; + struct rss_reply_data data = {}; + const struct ethtool_ops *ops; + int ret; + + ops = dev->ethtool_ops; + data.base.dev = dev; + + ret = rss_prepare(request, dev, &data, info); + if (ret) + return ret; + + rxfh.rss_context = request->rss_context; + + ret = rss_set_prep_indir(dev, info, &data, &rxfh, &indir_reset, &mod); + if (ret) + goto exit_clean_data; + indir_mod = !!tb[ETHTOOL_A_RSS_INDIR]; + + rxfh.hfunc = data.hfunc; + ethnl_update_u8(&rxfh.hfunc, tb[ETHTOOL_A_RSS_HFUNC], &mod); + if (rxfh.hfunc == data.hfunc) + rxfh.hfunc = ETH_RSS_HASH_NO_CHANGE; + + ret = rss_set_prep_hkey(dev, info, &data, &rxfh, &mod); + if (ret) + goto exit_free_indir; + + rxfh.input_xfrm = data.input_xfrm; + ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod); + /* For drivers which don't support input_xfrm it will be set to 0xff + * in the RSS context info. In all other case input_xfrm != 0 means + * symmetric hashing is requested. + */ + if (!request->rss_context || ops->rxfh_per_ctx_key) + xfrm_sym = rxfh.input_xfrm || data.input_xfrm; + if (rxfh.input_xfrm == data.input_xfrm) + rxfh.input_xfrm = RXH_XFRM_NO_CHANGE; + + mutex_lock(&dev->ethtool->rss_lock); + if (request->rss_context) { + ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context); + if (!ctx) { + ret = -ENOENT; + goto exit_unlock; + } + } + + ret = ethnl_set_rss_fields(dev, info, request->rss_context, + &data, xfrm_sym, &fields_mod); + if (ret) + goto exit_unlock; + + if (!mod) + ret = 0; /* nothing to tell the driver */ + else if (!ops->set_rxfh) + ret = -EOPNOTSUPP; + else if (!rxfh.rss_context) + ret = ops->set_rxfh(dev, &rxfh, info->extack); + else + ret = ops->modify_rxfh_context(dev, ctx, &rxfh, info->extack); + if (ret) + goto exit_unlock; + + if (ctx) + rss_set_ctx_update(ctx, tb, &data, &rxfh); + else if (indir_reset) + dev->priv_flags &= ~IFF_RXFH_CONFIGURED; + else if (indir_mod) + dev->priv_flags |= IFF_RXFH_CONFIGURED; + +exit_unlock: + mutex_unlock(&dev->ethtool->rss_lock); + kfree(rxfh.key); +exit_free_indir: + kfree(rxfh.indir); +exit_clean_data: + rss_cleanup_data(&data.base); + + return ret ?: mod || fields_mod; +} + +const struct ethnl_request_ops ethnl_rss_request_ops = { + .request_cmd = ETHTOOL_MSG_RSS_GET, + .reply_cmd = ETHTOOL_MSG_RSS_GET_REPLY, + .hdr_attr = ETHTOOL_A_RSS_HEADER, + .req_info_size = sizeof(struct rss_req_info), + .reply_data_size = sizeof(struct rss_reply_data), + + .parse_request = rss_parse_request, + .prepare_data = rss_prepare_data, + .reply_size = rss_reply_size, + .fill_reply = rss_fill_reply, + .cleanup_data = rss_cleanup_data, + + .set_validate = ethnl_rss_set_validate, + .set = ethnl_rss_set, + .set_ntf_cmd = ETHTOOL_MSG_RSS_NTF, +}; + +/* RSS_CREATE */ + +const struct nla_policy ethnl_rss_create_policy[ETHTOOL_A_RSS_INPUT_XFRM + 1] = { + [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_RSS_CONTEXT] = NLA_POLICY_MIN(NLA_U32, 1), + [ETHTOOL_A_RSS_HFUNC] = NLA_POLICY_MIN(NLA_U32, 1), + [ETHTOOL_A_RSS_INDIR] = NLA_POLICY_MIN(NLA_BINARY, 1), + [ETHTOOL_A_RSS_HKEY] = NLA_POLICY_MIN(NLA_BINARY, 1), + [ETHTOOL_A_RSS_INPUT_XFRM] = + NLA_POLICY_MAX(NLA_U32, RXH_XFRM_SYM_OR_XOR), +}; + +static int +ethnl_rss_create_validate(struct net_device *dev, struct genl_info *info) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct nlattr **tb = info->attrs; + struct nlattr *bad_attr = NULL; + u32 rss_context, input_xfrm; + + if (!ops->create_rxfh_context) + return -EOPNOTSUPP; + + rss_context = nla_get_u32_default(tb[ETHTOOL_A_RSS_CONTEXT], 0); + if (ops->rxfh_max_num_contexts && + ops->rxfh_max_num_contexts <= rss_context) { + NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT]); + return -ERANGE; + } + + if (!ops->rxfh_per_ctx_key) { + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HFUNC]; + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HKEY]; + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM]; + } + + input_xfrm = nla_get_u32_default(tb[ETHTOOL_A_RSS_INPUT_XFRM], 0); + if (input_xfrm & ~ops->supported_input_xfrm) + bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM]; + + if (bad_attr) { + NL_SET_BAD_ATTR(info->extack, bad_attr); + return -EOPNOTSUPP; + } + + return 0; +} + +static void +ethnl_rss_create_send_ntf(struct sk_buff *rsp, struct net_device *dev) +{ + struct nlmsghdr *nlh = (void *)rsp->data; + struct genlmsghdr *genl_hdr; + + /* Convert the reply into a notification */ + nlh->nlmsg_pid = 0; + nlh->nlmsg_seq = ethnl_bcast_seq_next(); + + genl_hdr = nlmsg_data(nlh); + genl_hdr->cmd = ETHTOOL_MSG_RSS_CREATE_NTF; + + ethnl_multicast(rsp, dev); +} + +int ethnl_rss_create_doit(struct sk_buff *skb, struct genl_info *info) +{ + bool indir_dflt = false, mod = false, ntf_fail = false; + struct ethtool_rxfh_param rxfh = {}; + struct ethtool_rxfh_context *ctx; + struct nlattr **tb = info->attrs; + struct rss_reply_data data = {}; + const struct ethtool_ops *ops; + struct rss_req_info req = {}; + struct net_device *dev; + struct sk_buff *rsp; + void *hdr; + u32 limit; + int ret; + + rsp = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!rsp) + return -ENOMEM; + + ret = ethnl_parse_header_dev_get(&req.base, tb[ETHTOOL_A_RSS_HEADER], + genl_info_net(info), info->extack, + true); + if (ret < 0) + goto exit_free_rsp; + + dev = req.base.dev; + ops = dev->ethtool_ops; + + req.rss_context = nla_get_u32_default(tb[ETHTOOL_A_RSS_CONTEXT], 0); + + ret = ethnl_rss_create_validate(dev, info); + if (ret) + goto exit_free_dev; + + rtnl_lock(); + netdev_lock_ops(dev); + + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto exit_dev_unlock; + + ret = rss_get_data_alloc(dev, &data); + if (ret) + goto exit_ops; + + ret = rss_set_prep_indir(dev, info, &data, &rxfh, &indir_dflt, &mod); + if (ret) + goto exit_clean_data; + + ethnl_update_u8(&rxfh.hfunc, tb[ETHTOOL_A_RSS_HFUNC], &mod); + + ret = rss_set_prep_hkey(dev, info, &data, &rxfh, &mod); + if (ret) + goto exit_free_indir; + + rxfh.input_xfrm = RXH_XFRM_NO_CHANGE; + ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod); + + ctx = ethtool_rxfh_ctx_alloc(ops, data.indir_size, data.hkey_size); + if (!ctx) { + ret = -ENOMEM; + goto exit_free_hkey; + } + + mutex_lock(&dev->ethtool->rss_lock); + if (!req.rss_context) { + limit = ops->rxfh_max_num_contexts ?: U32_MAX; + ret = xa_alloc(&dev->ethtool->rss_ctx, &req.rss_context, ctx, + XA_LIMIT(1, limit - 1), GFP_KERNEL_ACCOUNT); + } else { + ret = xa_insert(&dev->ethtool->rss_ctx, + req.rss_context, ctx, GFP_KERNEL_ACCOUNT); + } + if (ret < 0) { + NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT], + "error allocating context ID"); + goto err_unlock_free_ctx; + } + rxfh.rss_context = req.rss_context; + + ret = ops->create_rxfh_context(dev, ctx, &rxfh, info->extack); + if (ret) + goto err_ctx_id_free; + + /* Make sure driver populates defaults */ + WARN_ON_ONCE(!rxfh.key && ops->rxfh_per_ctx_key && + !memchr_inv(ethtool_rxfh_context_key(ctx), 0, + ctx->key_size)); + + /* Store the config from rxfh to Xarray.. */ + rss_set_ctx_update(ctx, tb, &data, &rxfh); + /* .. copy from Xarray to data. */ + __rss_prepare_ctx(dev, &data, ctx); + + hdr = ethnl_unicast_put(rsp, info->snd_portid, info->snd_seq, + ETHTOOL_MSG_RSS_CREATE_ACT_REPLY); + ntf_fail = ethnl_fill_reply_header(rsp, dev, ETHTOOL_A_RSS_HEADER); + ntf_fail |= rss_fill_reply(rsp, &req.base, &data.base); + if (WARN_ON(!hdr || ntf_fail)) { + ret = -EMSGSIZE; + goto exit_unlock; + } + + genlmsg_end(rsp, hdr); + + /* Use the same skb for the response and the notification, + * genlmsg_reply() will copy the skb if it has elevated user count. + */ + skb_get(rsp); + ret = genlmsg_reply(rsp, info); + ethnl_rss_create_send_ntf(rsp, dev); + rsp = NULL; + +exit_unlock: + mutex_unlock(&dev->ethtool->rss_lock); +exit_free_hkey: + kfree(rxfh.key); +exit_free_indir: + kfree(rxfh.indir); +exit_clean_data: + rss_get_data_free(&data); +exit_ops: + ethnl_ops_complete(dev); +exit_dev_unlock: + netdev_unlock_ops(dev); + rtnl_unlock(); +exit_free_dev: + ethnl_parse_header_dev_put(&req.base); +exit_free_rsp: + nlmsg_free(rsp); + return ret; + +err_ctx_id_free: + xa_erase(&dev->ethtool->rss_ctx, req.rss_context); +err_unlock_free_ctx: + kfree(ctx); + goto exit_unlock; +} + +/* RSS_DELETE */ + +const struct nla_policy ethnl_rss_delete_policy[ETHTOOL_A_RSS_CONTEXT + 1] = { + [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_RSS_CONTEXT] = NLA_POLICY_MIN(NLA_U32, 1), +}; + +int ethnl_rss_delete_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct ethtool_rxfh_context *ctx; + struct nlattr **tb = info->attrs; + struct ethnl_req_info req = {}; + const struct ethtool_ops *ops; + struct net_device *dev; + u32 rss_context; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, ETHTOOL_A_RSS_CONTEXT)) + return -EINVAL; + rss_context = nla_get_u32(tb[ETHTOOL_A_RSS_CONTEXT]); + + ret = ethnl_parse_header_dev_get(&req, tb[ETHTOOL_A_RSS_HEADER], + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + + dev = req.dev; + ops = dev->ethtool_ops; + + if (!ops->create_rxfh_context) + goto exit_free_dev; + + rtnl_lock(); + netdev_lock_ops(dev); + + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto exit_dev_unlock; + + mutex_lock(&dev->ethtool->rss_lock); + ret = ethtool_check_rss_ctx_busy(dev, rss_context); + if (ret) + goto exit_unlock; + + ctx = xa_load(&dev->ethtool->rss_ctx, rss_context); + if (!ctx) { + ret = -ENOENT; + goto exit_unlock; + } + + ret = ops->remove_rxfh_context(dev, ctx, rss_context, info->extack); + if (ret) + goto exit_unlock; + + WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rss_context) != ctx); + kfree(ctx); + + ethnl_rss_delete_notify(dev, rss_context); + +exit_unlock: + mutex_unlock(&dev->ethtool->rss_lock); + ethnl_ops_complete(dev); +exit_dev_unlock: + netdev_unlock_ops(dev); + rtnl_unlock(); +exit_free_dev: + ethnl_parse_header_dev_put(&req); + return ret; +} diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c new file mode 100644 index 000000000000..3ca8eb2a3b31 --- /dev/null +++ b/net/ethtool/stats.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/phy.h> +#include <linux/phylib_stubs.h> + +#include "netlink.h" +#include "common.h" +#include "bitset.h" + +struct stats_req_info { + struct ethnl_req_info base; + DECLARE_BITMAP(stat_mask, __ETHTOOL_STATS_CNT); + enum ethtool_mac_stats_src src; +}; + +#define STATS_REQINFO(__req_base) \ + container_of(__req_base, struct stats_req_info, base) + +struct stats_reply_data { + struct ethnl_reply_data base; + struct_group(stats, + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + struct ethtool_phy_stats phydev_stats; + ); + const struct ethtool_rmon_hist_range *rmon_ranges; +}; + +#define STATS_REPDATA(__reply_base) \ + container_of(__reply_base, struct stats_reply_data, base) + +const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN] = { + [ETHTOOL_STATS_ETH_PHY] = "eth-phy", + [ETHTOOL_STATS_ETH_MAC] = "eth-mac", + [ETHTOOL_STATS_ETH_CTRL] = "eth-ctrl", + [ETHTOOL_STATS_RMON] = "rmon", + [ETHTOOL_STATS_PHY] = "phydev", +}; + +const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN] = { + [ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR] = "SymbolErrorDuringCarrier", +}; + +const char stats_eth_mac_names[__ETHTOOL_A_STATS_ETH_MAC_CNT][ETH_GSTRING_LEN] = { + [ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT] = "FramesTransmittedOK", + [ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL] = "SingleCollisionFrames", + [ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL] = "MultipleCollisionFrames", + [ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT] = "FramesReceivedOK", + [ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR] = "FrameCheckSequenceErrors", + [ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR] = "AlignmentErrors", + [ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES] = "OctetsTransmittedOK", + [ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER] = "FramesWithDeferredXmissions", + [ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL] = "LateCollisions", + [ETHTOOL_A_STATS_ETH_MAC_11_XS_COL] = "FramesAbortedDueToXSColls", + [ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR] = "FramesLostDueToIntMACXmitError", + [ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR] = "CarrierSenseErrors", + [ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES] = "OctetsReceivedOK", + [ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR] = "FramesLostDueToIntMACRcvError", + [ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST] = "MulticastFramesXmittedOK", + [ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST] = "BroadcastFramesXmittedOK", + [ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER] = "FramesWithExcessiveDeferral", + [ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST] = "MulticastFramesReceivedOK", + [ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST] = "BroadcastFramesReceivedOK", + [ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR] = "InRangeLengthErrors", + [ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN] = "OutOfRangeLengthField", + [ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR] = "FrameTooLongErrors", +}; + +const char stats_eth_ctrl_names[__ETHTOOL_A_STATS_ETH_CTRL_CNT][ETH_GSTRING_LEN] = { + [ETHTOOL_A_STATS_ETH_CTRL_3_TX] = "MACControlFramesTransmitted", + [ETHTOOL_A_STATS_ETH_CTRL_4_RX] = "MACControlFramesReceived", + [ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP] = "UnsupportedOpcodesReceived", +}; + +const char stats_rmon_names[__ETHTOOL_A_STATS_RMON_CNT][ETH_GSTRING_LEN] = { + [ETHTOOL_A_STATS_RMON_UNDERSIZE] = "etherStatsUndersizePkts", + [ETHTOOL_A_STATS_RMON_OVERSIZE] = "etherStatsOversizePkts", + [ETHTOOL_A_STATS_RMON_FRAG] = "etherStatsFragments", + [ETHTOOL_A_STATS_RMON_JABBER] = "etherStatsJabbers", +}; + +const char stats_phy_names[__ETHTOOL_A_STATS_PHY_CNT][ETH_GSTRING_LEN] = { + [ETHTOOL_A_STATS_PHY_RX_PKTS] = "RxFrames", + [ETHTOOL_A_STATS_PHY_RX_BYTES] = "RxOctets", + [ETHTOOL_A_STATS_PHY_RX_ERRORS] = "RxErrors", + [ETHTOOL_A_STATS_PHY_TX_PKTS] = "TxFrames", + [ETHTOOL_A_STATS_PHY_TX_BYTES] = "TxOctets", + [ETHTOOL_A_STATS_PHY_TX_ERRORS] = "TxErrors", +}; + +const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_SRC + 1] = { + [ETHTOOL_A_STATS_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_STATS_GROUPS] = { .type = NLA_NESTED }, + [ETHTOOL_A_STATS_SRC] = + NLA_POLICY_MAX(NLA_U32, ETHTOOL_MAC_STATS_SRC_PMAC), +}; + +static int stats_parse_request(struct ethnl_req_info *req_base, + struct nlattr **tb, + struct netlink_ext_ack *extack) +{ + enum ethtool_mac_stats_src src = ETHTOOL_MAC_STATS_SRC_AGGREGATE; + struct stats_req_info *req_info = STATS_REQINFO(req_base); + bool mod = false; + int err; + + err = ethnl_update_bitset(req_info->stat_mask, __ETHTOOL_STATS_CNT, + tb[ETHTOOL_A_STATS_GROUPS], stats_std_names, + extack, &mod); + if (err) + return err; + + if (!mod) { + NL_SET_ERR_MSG(extack, "no stats requested"); + return -EINVAL; + } + + if (tb[ETHTOOL_A_STATS_SRC]) + src = nla_get_u32(tb[ETHTOOL_A_STATS_SRC]); + + req_info->src = src; + + return 0; +} + +static int stats_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + const struct stats_req_info *req_info = STATS_REQINFO(req_base); + struct stats_reply_data *data = STATS_REPDATA(reply_base); + enum ethtool_mac_stats_src src = req_info->src; + struct net_device *dev = reply_base->dev; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; + int ret; + + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_STATS_HEADER, + info->extack); + if (IS_ERR(phydev)) + return PTR_ERR(phydev); + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + if ((src == ETHTOOL_MAC_STATS_SRC_EMAC || + src == ETHTOOL_MAC_STATS_SRC_PMAC) && + !__ethtool_dev_mm_supported(dev)) { + NL_SET_ERR_MSG_MOD(info->extack, + "Device does not support MAC merge layer"); + ethnl_ops_complete(dev); + return -EOPNOTSUPP; + } + + /* Mark all stats as unset (see ETHTOOL_STAT_NOT_SET) to prevent them + * from being reported to user space in case driver did not set them. + */ + memset(&data->stats, 0xff, sizeof(data->stats)); + + data->phy_stats.src = src; + data->mac_stats.src = src; + data->ctrl_stats.src = src; + data->rmon_stats.src = src; + + if ((test_bit(ETHTOOL_STATS_PHY, req_info->stat_mask) || + test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask)) && + src == ETHTOOL_MAC_STATS_SRC_AGGREGATE) { + if (phydev) + phy_ethtool_get_phy_stats(phydev, &data->phy_stats, + &data->phydev_stats); + } + + if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) && + dev->ethtool_ops->get_eth_phy_stats) + dev->ethtool_ops->get_eth_phy_stats(dev, &data->phy_stats); + if (test_bit(ETHTOOL_STATS_ETH_MAC, req_info->stat_mask) && + dev->ethtool_ops->get_eth_mac_stats) + dev->ethtool_ops->get_eth_mac_stats(dev, &data->mac_stats); + if (test_bit(ETHTOOL_STATS_ETH_CTRL, req_info->stat_mask) && + dev->ethtool_ops->get_eth_ctrl_stats) + dev->ethtool_ops->get_eth_ctrl_stats(dev, &data->ctrl_stats); + if (test_bit(ETHTOOL_STATS_RMON, req_info->stat_mask) && + dev->ethtool_ops->get_rmon_stats) + dev->ethtool_ops->get_rmon_stats(dev, &data->rmon_stats, + &data->rmon_ranges); + + ethnl_ops_complete(dev); + return 0; +} + +static int stats_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct stats_req_info *req_info = STATS_REQINFO(req_base); + unsigned int n_grps = 0, n_stats = 0; + int len = 0; + + len += nla_total_size(sizeof(u32)); /* _STATS_SRC */ + + if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask)) { + n_stats += sizeof(struct ethtool_eth_phy_stats) / sizeof(u64); + n_grps++; + } + if (test_bit(ETHTOOL_STATS_ETH_MAC, req_info->stat_mask)) { + n_stats += sizeof(struct ethtool_eth_mac_stats) / sizeof(u64); + n_grps++; + } + if (test_bit(ETHTOOL_STATS_ETH_CTRL, req_info->stat_mask)) { + n_stats += sizeof(struct ethtool_eth_ctrl_stats) / sizeof(u64); + n_grps++; + } + if (test_bit(ETHTOOL_STATS_RMON, req_info->stat_mask)) { + n_stats += sizeof(struct ethtool_rmon_stats) / sizeof(u64); + n_grps++; + /* Above includes the space for _A_STATS_GRP_HIST_VALs */ + + len += (nla_total_size(0) + /* _A_STATS_GRP_HIST */ + nla_total_size(4) + /* _A_STATS_GRP_HIST_BKT_LOW */ + nla_total_size(4)) * /* _A_STATS_GRP_HIST_BKT_HI */ + ETHTOOL_RMON_HIST_MAX * 2; + } + if (test_bit(ETHTOOL_STATS_PHY, req_info->stat_mask)) { + n_stats += sizeof(struct ethtool_phy_stats) / sizeof(u64); + n_grps++; + } + + len += n_grps * (nla_total_size(0) + /* _A_STATS_GRP */ + nla_total_size(4) + /* _A_STATS_GRP_ID */ + nla_total_size(4)); /* _A_STATS_GRP_SS_ID */ + len += n_stats * (nla_total_size(0) + /* _A_STATS_GRP_STAT */ + nla_total_size_64bit(sizeof(u64))); + + return len; +} + +static int stat_put(struct sk_buff *skb, u16 attrtype, u64 val) +{ + struct nlattr *nest; + int ret; + + if (val == ETHTOOL_STAT_NOT_SET) + return 0; + + /* We want to start stats attr types from 0, so we don't have a type + * for pad inside ETHTOOL_A_STATS_GRP_STAT. Pad things on the outside + * of ETHTOOL_A_STATS_GRP_STAT. Since we're one nest away from the + * actual attr we're 4B off - nla_need_padding_for_64bit() & co. + * can't be used. + */ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8)) + if (!nla_reserve(skb, ETHTOOL_A_STATS_GRP_PAD, 0)) + return -EMSGSIZE; +#endif + + nest = nla_nest_start(skb, ETHTOOL_A_STATS_GRP_STAT); + if (!nest) + return -EMSGSIZE; + + ret = nla_put_u64_64bit(skb, attrtype, val, -1 /* not used */); + if (ret) { + nla_nest_cancel(skb, nest); + return ret; + } + + nla_nest_end(skb, nest); + return 0; +} + +static int stats_put_phy_stats(struct sk_buff *skb, + const struct stats_reply_data *data) +{ + if (stat_put(skb, ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR, + data->phy_stats.SymbolErrorDuringCarrier)) + return -EMSGSIZE; + return 0; +} + +static int stats_put_phydev_stats(struct sk_buff *skb, + const struct stats_reply_data *data) +{ + if (stat_put(skb, ETHTOOL_A_STATS_PHY_RX_PKTS, + data->phydev_stats.rx_packets) || + stat_put(skb, ETHTOOL_A_STATS_PHY_RX_BYTES, + data->phydev_stats.rx_bytes) || + stat_put(skb, ETHTOOL_A_STATS_PHY_RX_ERRORS, + data->phydev_stats.rx_errors) || + stat_put(skb, ETHTOOL_A_STATS_PHY_TX_PKTS, + data->phydev_stats.tx_packets) || + stat_put(skb, ETHTOOL_A_STATS_PHY_TX_BYTES, + data->phydev_stats.tx_bytes) || + stat_put(skb, ETHTOOL_A_STATS_PHY_TX_ERRORS, + data->phydev_stats.tx_errors)) + return -EMSGSIZE; + return 0; +} + +static int stats_put_mac_stats(struct sk_buff *skb, + const struct stats_reply_data *data) +{ + if (stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT, + data->mac_stats.FramesTransmittedOK) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL, + data->mac_stats.SingleCollisionFrames) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL, + data->mac_stats.MultipleCollisionFrames) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT, + data->mac_stats.FramesReceivedOK) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR, + data->mac_stats.FrameCheckSequenceErrors) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR, + data->mac_stats.AlignmentErrors) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES, + data->mac_stats.OctetsTransmittedOK) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER, + data->mac_stats.FramesWithDeferredXmissions) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL, + data->mac_stats.LateCollisions) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_11_XS_COL, + data->mac_stats.FramesAbortedDueToXSColls) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR, + data->mac_stats.FramesLostDueToIntMACXmitError) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR, + data->mac_stats.CarrierSenseErrors) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES, + data->mac_stats.OctetsReceivedOK) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR, + data->mac_stats.FramesLostDueToIntMACRcvError) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST, + data->mac_stats.MulticastFramesXmittedOK) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST, + data->mac_stats.BroadcastFramesXmittedOK) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER, + data->mac_stats.FramesWithExcessiveDeferral) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST, + data->mac_stats.MulticastFramesReceivedOK) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST, + data->mac_stats.BroadcastFramesReceivedOK) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR, + data->mac_stats.InRangeLengthErrors) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN, + data->mac_stats.OutOfRangeLengthField) || + stat_put(skb, ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR, + data->mac_stats.FrameTooLongErrors)) + return -EMSGSIZE; + return 0; +} + +static int stats_put_ctrl_stats(struct sk_buff *skb, + const struct stats_reply_data *data) +{ + if (stat_put(skb, ETHTOOL_A_STATS_ETH_CTRL_3_TX, + data->ctrl_stats.MACControlFramesTransmitted) || + stat_put(skb, ETHTOOL_A_STATS_ETH_CTRL_4_RX, + data->ctrl_stats.MACControlFramesReceived) || + stat_put(skb, ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP, + data->ctrl_stats.UnsupportedOpcodesReceived)) + return -EMSGSIZE; + return 0; +} + +static int stats_put_rmon_hist(struct sk_buff *skb, u32 attr, const u64 *hist, + const struct ethtool_rmon_hist_range *ranges) +{ + struct nlattr *nest; + int i; + + if (!ranges) + return 0; + + for (i = 0; i < ETHTOOL_RMON_HIST_MAX; i++) { + if (!ranges[i].low && !ranges[i].high) + break; + if (hist[i] == ETHTOOL_STAT_NOT_SET) + continue; + + nest = nla_nest_start(skb, attr); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_STATS_GRP_HIST_BKT_LOW, + ranges[i].low) || + nla_put_u32(skb, ETHTOOL_A_STATS_GRP_HIST_BKT_HI, + ranges[i].high) || + nla_put_u64_64bit(skb, ETHTOOL_A_STATS_GRP_HIST_VAL, + hist[i], ETHTOOL_A_STATS_GRP_PAD)) + goto err_cancel_hist; + + nla_nest_end(skb, nest); + } + + return 0; + +err_cancel_hist: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int stats_put_rmon_stats(struct sk_buff *skb, + const struct stats_reply_data *data) +{ + if (stats_put_rmon_hist(skb, ETHTOOL_A_STATS_GRP_HIST_RX, + data->rmon_stats.hist, data->rmon_ranges) || + stats_put_rmon_hist(skb, ETHTOOL_A_STATS_GRP_HIST_TX, + data->rmon_stats.hist_tx, data->rmon_ranges)) + return -EMSGSIZE; + + if (stat_put(skb, ETHTOOL_A_STATS_RMON_UNDERSIZE, + data->rmon_stats.undersize_pkts) || + stat_put(skb, ETHTOOL_A_STATS_RMON_OVERSIZE, + data->rmon_stats.oversize_pkts) || + stat_put(skb, ETHTOOL_A_STATS_RMON_FRAG, + data->rmon_stats.fragments) || + stat_put(skb, ETHTOOL_A_STATS_RMON_JABBER, + data->rmon_stats.jabbers)) + return -EMSGSIZE; + + return 0; +} + +static int stats_put_stats(struct sk_buff *skb, + const struct stats_reply_data *data, + u32 id, u32 ss_id, + int (*cb)(struct sk_buff *skb, + const struct stats_reply_data *data)) +{ + struct nlattr *nest; + + nest = nla_nest_start(skb, ETHTOOL_A_STATS_GRP); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_STATS_GRP_ID, id) || + nla_put_u32(skb, ETHTOOL_A_STATS_GRP_SS_ID, ss_id)) + goto err_cancel; + + if (cb(skb, data)) + goto err_cancel; + + nla_nest_end(skb, nest); + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int stats_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct stats_req_info *req_info = STATS_REQINFO(req_base); + const struct stats_reply_data *data = STATS_REPDATA(reply_base); + int ret = 0; + + if (nla_put_u32(skb, ETHTOOL_A_STATS_SRC, req_info->src)) + return -EMSGSIZE; + + if (!ret && test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask)) + ret = stats_put_stats(skb, data, ETHTOOL_STATS_ETH_PHY, + ETH_SS_STATS_ETH_PHY, + stats_put_phy_stats); + if (!ret && test_bit(ETHTOOL_STATS_ETH_MAC, req_info->stat_mask)) + ret = stats_put_stats(skb, data, ETHTOOL_STATS_ETH_MAC, + ETH_SS_STATS_ETH_MAC, + stats_put_mac_stats); + if (!ret && test_bit(ETHTOOL_STATS_ETH_CTRL, req_info->stat_mask)) + ret = stats_put_stats(skb, data, ETHTOOL_STATS_ETH_CTRL, + ETH_SS_STATS_ETH_CTRL, + stats_put_ctrl_stats); + if (!ret && test_bit(ETHTOOL_STATS_RMON, req_info->stat_mask)) + ret = stats_put_stats(skb, data, ETHTOOL_STATS_RMON, + ETH_SS_STATS_RMON, stats_put_rmon_stats); + if (!ret && test_bit(ETHTOOL_STATS_PHY, req_info->stat_mask)) + ret = stats_put_stats(skb, data, ETHTOOL_STATS_PHY, + ETH_SS_STATS_PHY, stats_put_phydev_stats); + + return ret; +} + +const struct ethnl_request_ops ethnl_stats_request_ops = { + .request_cmd = ETHTOOL_MSG_STATS_GET, + .reply_cmd = ETHTOOL_MSG_STATS_GET_REPLY, + .hdr_attr = ETHTOOL_A_STATS_HEADER, + .req_info_size = sizeof(struct stats_req_info), + .reply_data_size = sizeof(struct stats_reply_data), + + .parse_request = stats_parse_request, + .prepare_data = stats_prepare_data, + .reply_size = stats_reply_size, + .fill_reply = stats_fill_reply, +}; + +static u64 ethtool_stats_sum(u64 a, u64 b) +{ + if (a == ETHTOOL_STAT_NOT_SET) + return b; + if (b == ETHTOOL_STAT_NOT_SET) + return a; + return a + b; +} + +/* Avoid modifying the aggregation procedure every time a new counter is added + * by treating the structures as an array of u64 statistics. + */ +static void ethtool_aggregate_stats(void *aggr_stats, const void *emac_stats, + const void *pmac_stats, size_t stats_size, + size_t stats_offset) +{ + size_t num_stats = stats_size / sizeof(u64); + const u64 *s1 = emac_stats + stats_offset; + const u64 *s2 = pmac_stats + stats_offset; + u64 *s = aggr_stats + stats_offset; + int i; + + for (i = 0; i < num_stats; i++) + s[i] = ethtool_stats_sum(s1[i], s2[i]); +} + +void ethtool_aggregate_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_eth_mac_stats pmac, emac; + + memset(&emac, 0xff, sizeof(emac)); + memset(&pmac, 0xff, sizeof(pmac)); + emac.src = ETHTOOL_MAC_STATS_SRC_EMAC; + pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC; + + ops->get_eth_mac_stats(dev, &emac); + ops->get_eth_mac_stats(dev, &pmac); + + ethtool_aggregate_stats(mac_stats, &emac, &pmac, + sizeof(mac_stats->stats), + offsetof(struct ethtool_eth_mac_stats, stats)); +} +EXPORT_SYMBOL(ethtool_aggregate_mac_stats); + +void ethtool_aggregate_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_eth_phy_stats pmac, emac; + + memset(&emac, 0xff, sizeof(emac)); + memset(&pmac, 0xff, sizeof(pmac)); + emac.src = ETHTOOL_MAC_STATS_SRC_EMAC; + pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC; + + ops->get_eth_phy_stats(dev, &emac); + ops->get_eth_phy_stats(dev, &pmac); + + ethtool_aggregate_stats(phy_stats, &emac, &pmac, + sizeof(phy_stats->stats), + offsetof(struct ethtool_eth_phy_stats, stats)); +} +EXPORT_SYMBOL(ethtool_aggregate_phy_stats); + +void ethtool_aggregate_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_eth_ctrl_stats pmac, emac; + + memset(&emac, 0xff, sizeof(emac)); + memset(&pmac, 0xff, sizeof(pmac)); + emac.src = ETHTOOL_MAC_STATS_SRC_EMAC; + pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC; + + ops->get_eth_ctrl_stats(dev, &emac); + ops->get_eth_ctrl_stats(dev, &pmac); + + ethtool_aggregate_stats(ctrl_stats, &emac, &pmac, + sizeof(ctrl_stats->stats), + offsetof(struct ethtool_eth_ctrl_stats, stats)); +} +EXPORT_SYMBOL(ethtool_aggregate_ctrl_stats); + +void ethtool_aggregate_pause_stats(struct net_device *dev, + struct ethtool_pause_stats *pause_stats) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_pause_stats pmac, emac; + + memset(&emac, 0xff, sizeof(emac)); + memset(&pmac, 0xff, sizeof(pmac)); + emac.src = ETHTOOL_MAC_STATS_SRC_EMAC; + pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC; + + ops->get_pause_stats(dev, &emac); + ops->get_pause_stats(dev, &pmac); + + ethtool_aggregate_stats(pause_stats, &emac, &pmac, + sizeof(pause_stats->stats), + offsetof(struct ethtool_pause_stats, stats)); +} +EXPORT_SYMBOL(ethtool_aggregate_pause_stats); + +void ethtool_aggregate_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + const struct ethtool_rmon_hist_range *dummy; + struct ethtool_rmon_stats pmac, emac; + + memset(&emac, 0xff, sizeof(emac)); + memset(&pmac, 0xff, sizeof(pmac)); + emac.src = ETHTOOL_MAC_STATS_SRC_EMAC; + pmac.src = ETHTOOL_MAC_STATS_SRC_PMAC; + + ops->get_rmon_stats(dev, &emac, &dummy); + ops->get_rmon_stats(dev, &pmac, &dummy); + + ethtool_aggregate_stats(rmon_stats, &emac, &pmac, + sizeof(rmon_stats->stats), + offsetof(struct ethtool_rmon_stats, stats)); +} +EXPORT_SYMBOL(ethtool_aggregate_rmon_stats); diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c new file mode 100644 index 000000000000..f6a67109beda --- /dev/null +++ b/net/ethtool/strset.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool.h> +#include <linux/phy.h> +#include "netlink.h" +#include "common.h" + +struct strset_info { + bool per_dev; + bool free_strings; + unsigned int count; + const char (*strings)[ETH_GSTRING_LEN]; +}; + +static const struct strset_info info_template[] = { + [ETH_SS_TEST] = { + .per_dev = true, + }, + [ETH_SS_STATS] = { + .per_dev = true, + }, + [ETH_SS_PRIV_FLAGS] = { + .per_dev = true, + }, + [ETH_SS_FEATURES] = { + .per_dev = false, + .count = ARRAY_SIZE(netdev_features_strings), + .strings = netdev_features_strings, + }, + [ETH_SS_RSS_HASH_FUNCS] = { + .per_dev = false, + .count = ARRAY_SIZE(rss_hash_func_strings), + .strings = rss_hash_func_strings, + }, + [ETH_SS_TUNABLES] = { + .per_dev = false, + .count = ARRAY_SIZE(tunable_strings), + .strings = tunable_strings, + }, + [ETH_SS_PHY_STATS] = { + .per_dev = true, + }, + [ETH_SS_PHY_TUNABLES] = { + .per_dev = false, + .count = ARRAY_SIZE(phy_tunable_strings), + .strings = phy_tunable_strings, + }, + [ETH_SS_LINK_MODES] = { + .per_dev = false, + .count = __ETHTOOL_LINK_MODE_MASK_NBITS, + .strings = link_mode_names, + }, + [ETH_SS_MSG_CLASSES] = { + .per_dev = false, + .count = NETIF_MSG_CLASS_COUNT, + .strings = netif_msg_class_names, + }, + [ETH_SS_WOL_MODES] = { + .per_dev = false, + .count = WOL_MODE_COUNT, + .strings = wol_mode_names, + }, + [ETH_SS_SOF_TIMESTAMPING] = { + .per_dev = false, + .count = __SOF_TIMESTAMPING_CNT, + .strings = sof_timestamping_names, + }, + [ETH_SS_TS_TX_TYPES] = { + .per_dev = false, + .count = __HWTSTAMP_TX_CNT, + .strings = ts_tx_type_names, + }, + [ETH_SS_TS_RX_FILTERS] = { + .per_dev = false, + .count = __HWTSTAMP_FILTER_CNT, + .strings = ts_rx_filter_names, + }, + [ETH_SS_TS_FLAGS] = { + .per_dev = false, + .count = __HWTSTAMP_FLAG_CNT, + .strings = ts_flags_names, + }, + [ETH_SS_UDP_TUNNEL_TYPES] = { + .per_dev = false, + .count = __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + .strings = udp_tunnel_type_names, + }, + [ETH_SS_STATS_STD] = { + .per_dev = false, + .count = __ETHTOOL_STATS_CNT, + .strings = stats_std_names, + }, + [ETH_SS_STATS_ETH_PHY] = { + .per_dev = false, + .count = __ETHTOOL_A_STATS_ETH_PHY_CNT, + .strings = stats_eth_phy_names, + }, + [ETH_SS_STATS_ETH_MAC] = { + .per_dev = false, + .count = __ETHTOOL_A_STATS_ETH_MAC_CNT, + .strings = stats_eth_mac_names, + }, + [ETH_SS_STATS_ETH_CTRL] = { + .per_dev = false, + .count = __ETHTOOL_A_STATS_ETH_CTRL_CNT, + .strings = stats_eth_ctrl_names, + }, + [ETH_SS_STATS_RMON] = { + .per_dev = false, + .count = __ETHTOOL_A_STATS_RMON_CNT, + .strings = stats_rmon_names, + }, + [ETH_SS_STATS_PHY] = { + .per_dev = false, + .count = __ETHTOOL_A_STATS_PHY_CNT, + .strings = stats_phy_names, + }, +}; + +struct strset_req_info { + struct ethnl_req_info base; + u32 req_ids; + bool counts_only; +}; + +#define STRSET_REQINFO(__req_base) \ + container_of(__req_base, struct strset_req_info, base) + +struct strset_reply_data { + struct ethnl_reply_data base; + struct strset_info sets[ETH_SS_COUNT]; +}; + +#define STRSET_REPDATA(__reply_base) \ + container_of(__reply_base, struct strset_reply_data, base) + +const struct nla_policy ethnl_strset_get_policy[] = { + [ETHTOOL_A_STRSET_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy_phy), + [ETHTOOL_A_STRSET_STRINGSETS] = { .type = NLA_NESTED }, + [ETHTOOL_A_STRSET_COUNTS_ONLY] = { .type = NLA_FLAG }, +}; + +static const struct nla_policy get_stringset_policy[] = { + [ETHTOOL_A_STRINGSET_ID] = { .type = NLA_U32 }, +}; + +/** + * strset_include() - test if a string set should be included in reply + * @info: parsed client request + * @data: pointer to request data structure + * @id: id of string set to check (ETH_SS_* constants) + */ +static bool strset_include(const struct strset_req_info *info, + const struct strset_reply_data *data, u32 id) +{ + bool per_dev; + + BUILD_BUG_ON(ETH_SS_COUNT >= BITS_PER_BYTE * sizeof(info->req_ids)); + + if (info->req_ids) + return info->req_ids & (1U << id); + per_dev = data->sets[id].per_dev; + if (!per_dev && !data->sets[id].strings) + return false; + + return data->base.dev ? per_dev : !per_dev; +} + +static int strset_get_id(const struct nlattr *nest, u32 *val, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[ARRAY_SIZE(get_stringset_policy)]; + int ret; + + ret = nla_parse_nested(tb, ARRAY_SIZE(get_stringset_policy) - 1, nest, + get_stringset_policy, extack); + if (ret < 0) + return ret; + if (NL_REQ_ATTR_CHECK(extack, nest, tb, ETHTOOL_A_STRINGSET_ID)) + return -EINVAL; + + *val = nla_get_u32(tb[ETHTOOL_A_STRINGSET_ID]); + return 0; +} + +static const struct nla_policy strset_stringsets_policy[] = { + [ETHTOOL_A_STRINGSETS_STRINGSET] = { .type = NLA_NESTED }, +}; + +static int strset_parse_request(struct ethnl_req_info *req_base, + struct nlattr **tb, + struct netlink_ext_ack *extack) +{ + struct strset_req_info *req_info = STRSET_REQINFO(req_base); + struct nlattr *nest = tb[ETHTOOL_A_STRSET_STRINGSETS]; + struct nlattr *attr; + int rem, ret; + + if (!nest) + return 0; + ret = nla_validate_nested(nest, + ARRAY_SIZE(strset_stringsets_policy) - 1, + strset_stringsets_policy, extack); + if (ret < 0) + return ret; + + req_info->counts_only = tb[ETHTOOL_A_STRSET_COUNTS_ONLY]; + nla_for_each_nested(attr, nest, rem) { + u32 id; + + if (WARN_ONCE(nla_type(attr) != ETHTOOL_A_STRINGSETS_STRINGSET, + "unexpected attrtype %u in ETHTOOL_A_STRSET_STRINGSETS\n", + nla_type(attr))) + return -EINVAL; + + ret = strset_get_id(attr, &id, extack); + if (ret < 0) + return ret; + if (id >= ETH_SS_COUNT) { + NL_SET_ERR_MSG_ATTR(extack, attr, + "unknown string set id"); + return -EOPNOTSUPP; + } + + req_info->req_ids |= (1U << id); + } + + return 0; +} + +static void strset_cleanup_data(struct ethnl_reply_data *reply_base) +{ + struct strset_reply_data *data = STRSET_REPDATA(reply_base); + unsigned int i; + + for (i = 0; i < ETH_SS_COUNT; i++) + if (data->sets[i].free_strings) { + kfree(data->sets[i].strings); + data->sets[i].strings = NULL; + data->sets[i].free_strings = false; + } +} + +static int strset_prepare_set(struct strset_info *info, struct net_device *dev, + struct phy_device *phydev, unsigned int id, + bool counts_only) +{ + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; + const struct ethtool_ops *ops = dev->ethtool_ops; + void *strings; + int count, ret; + + if (id == ETH_SS_PHY_STATS && phydev && + !ops->get_ethtool_phy_stats && phy_ops && + phy_ops->get_sset_count) + ret = phy_ops->get_sset_count(phydev); + else if (ops->get_sset_count && ops->get_strings) + ret = ops->get_sset_count(dev, id); + else + ret = -EOPNOTSUPP; + if (ret <= 0) { + info->count = 0; + return 0; + } + + count = ret; + if (!counts_only) { + strings = kcalloc(count, ETH_GSTRING_LEN, GFP_KERNEL); + if (!strings) + return -ENOMEM; + if (id == ETH_SS_PHY_STATS && phydev && + !ops->get_ethtool_phy_stats && phy_ops && + phy_ops->get_strings) + phy_ops->get_strings(phydev, strings); + else + ops->get_strings(dev, id, strings); + info->strings = strings; + info->free_strings = true; + } + info->count = count; + + return 0; +} + +static int strset_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + const struct strset_req_info *req_info = STRSET_REQINFO(req_base); + struct strset_reply_data *data = STRSET_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; + unsigned int i; + int ret; + + BUILD_BUG_ON(ARRAY_SIZE(info_template) != ETH_SS_COUNT); + memcpy(&data->sets, &info_template, sizeof(data->sets)); + + if (!dev) { + for (i = 0; i < ETH_SS_COUNT; i++) { + if ((req_info->req_ids & (1U << i)) && + data->sets[i].per_dev) { + GENL_SET_ERR_MSG(info, "requested per device strings without dev"); + return -EINVAL; + } + } + return 0; + } + + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_HEADER_FLAGS, + info->extack); + + /* phydev can be NULL, check for errors only */ + if (IS_ERR(phydev)) + return PTR_ERR(phydev); + + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto err_strset; + for (i = 0; i < ETH_SS_COUNT; i++) { + if (!strset_include(req_info, data, i) || + !data->sets[i].per_dev) + continue; + + ret = strset_prepare_set(&data->sets[i], dev, phydev, i, + req_info->counts_only); + if (ret < 0) + goto err_ops; + } + ethnl_ops_complete(dev); + + return 0; +err_ops: + ethnl_ops_complete(dev); +err_strset: + strset_cleanup_data(reply_base); + return ret; +} + +/* calculate size of ETHTOOL_A_STRSET_STRINGSET nest for one string set */ +static int strset_set_size(const struct strset_info *info, bool counts_only) +{ + unsigned int len = 0; + unsigned int i; + + if (info->count == 0) + return 0; + if (counts_only) + return nla_total_size(2 * nla_total_size(sizeof(u32))); + + for (i = 0; i < info->count; i++) { + const char *str = info->strings[i]; + + /* ETHTOOL_A_STRING_INDEX, ETHTOOL_A_STRING_VALUE, nest */ + len += nla_total_size(nla_total_size(sizeof(u32)) + + ethnl_strz_size(str)); + } + /* ETHTOOL_A_STRINGSET_ID, ETHTOOL_A_STRINGSET_COUNT */ + len = 2 * nla_total_size(sizeof(u32)) + nla_total_size(len); + + return nla_total_size(len); +} + +static int strset_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct strset_req_info *req_info = STRSET_REQINFO(req_base); + const struct strset_reply_data *data = STRSET_REPDATA(reply_base); + unsigned int i; + int len = 0; + int ret; + + len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */ + + for (i = 0; i < ETH_SS_COUNT; i++) { + const struct strset_info *set_info = &data->sets[i]; + + if (!strset_include(req_info, data, i)) + continue; + + ret = strset_set_size(set_info, req_info->counts_only); + if (ret < 0) + return ret; + len += ret; + } + + return len; +} + +/* fill one string into reply */ +static int strset_fill_string(struct sk_buff *skb, + const struct strset_info *set_info, u32 idx) +{ + struct nlattr *string_attr; + const char *value; + + value = set_info->strings[idx]; + + string_attr = nla_nest_start(skb, ETHTOOL_A_STRINGS_STRING); + if (!string_attr) + return -EMSGSIZE; + if (nla_put_u32(skb, ETHTOOL_A_STRING_INDEX, idx) || + ethnl_put_strz(skb, ETHTOOL_A_STRING_VALUE, value)) + goto nla_put_failure; + nla_nest_end(skb, string_attr); + + return 0; +nla_put_failure: + nla_nest_cancel(skb, string_attr); + return -EMSGSIZE; +} + +/* fill one string set into reply */ +static int strset_fill_set(struct sk_buff *skb, + const struct strset_info *set_info, u32 id, + bool counts_only) +{ + struct nlattr *stringset_attr; + struct nlattr *strings_attr; + unsigned int i; + + if (!set_info->per_dev && !set_info->strings) + return -EOPNOTSUPP; + if (set_info->count == 0) + return 0; + stringset_attr = nla_nest_start(skb, ETHTOOL_A_STRINGSETS_STRINGSET); + if (!stringset_attr) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_STRINGSET_ID, id) || + nla_put_u32(skb, ETHTOOL_A_STRINGSET_COUNT, set_info->count)) + goto nla_put_failure; + + if (!counts_only) { + strings_attr = nla_nest_start(skb, ETHTOOL_A_STRINGSET_STRINGS); + if (!strings_attr) + goto nla_put_failure; + for (i = 0; i < set_info->count; i++) { + if (strset_fill_string(skb, set_info, i) < 0) + goto nla_put_failure; + } + nla_nest_end(skb, strings_attr); + } + + nla_nest_end(skb, stringset_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, stringset_attr); + return -EMSGSIZE; +} + +static int strset_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct strset_req_info *req_info = STRSET_REQINFO(req_base); + const struct strset_reply_data *data = STRSET_REPDATA(reply_base); + struct nlattr *nest; + unsigned int i; + int ret; + + nest = nla_nest_start(skb, ETHTOOL_A_STRSET_STRINGSETS); + if (!nest) + return -EMSGSIZE; + + for (i = 0; i < ETH_SS_COUNT; i++) { + if (strset_include(req_info, data, i)) { + ret = strset_fill_set(skb, &data->sets[i], i, + req_info->counts_only); + if (ret < 0) + goto nla_put_failure; + } + } + + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return ret; +} + +const struct ethnl_request_ops ethnl_strset_request_ops = { + .request_cmd = ETHTOOL_MSG_STRSET_GET, + .reply_cmd = ETHTOOL_MSG_STRSET_GET_REPLY, + .hdr_attr = ETHTOOL_A_STRSET_HEADER, + .req_info_size = sizeof(struct strset_req_info), + .reply_data_size = sizeof(struct strset_reply_data), + .allow_nodev_do = true, + + .parse_request = strset_parse_request, + .prepare_data = strset_prepare_data, + .reply_size = strset_reply_size, + .fill_reply = strset_fill_reply, + .cleanup_data = strset_cleanup_data, +}; diff --git a/net/ethtool/ts.h b/net/ethtool/ts.h new file mode 100644 index 000000000000..d901a879a671 --- /dev/null +++ b/net/ethtool/ts.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _NET_ETHTOOL_TS_H +#define _NET_ETHTOOL_TS_H + +#include "netlink.h" + +static const struct nla_policy +ethnl_ts_hwtst_prov_policy[ETHTOOL_A_TS_HWTSTAMP_PROVIDER_MAX + 1] = { + [ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX] = { .type = NLA_U32 }, + [ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER] = + NLA_POLICY_MAX(NLA_U32, HWTSTAMP_PROVIDER_QUALIFIER_CNT - 1) +}; + +int ts_parse_hwtst_provider(const struct nlattr *nest, + struct hwtstamp_provider_desc *hwprov_desc, + struct netlink_ext_ack *extack, + bool *mod); + +#endif /* _NET_ETHTOOL_TS_H */ diff --git a/net/ethtool/tsconfig.c b/net/ethtool/tsconfig.c new file mode 100644 index 000000000000..169b413b31fc --- /dev/null +++ b/net/ethtool/tsconfig.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> + +#include "netlink.h" +#include "common.h" +#include "bitset.h" +#include "../core/dev.h" +#include "ts.h" + +struct tsconfig_req_info { + struct ethnl_req_info base; +}; + +struct tsconfig_reply_data { + struct ethnl_reply_data base; + struct hwtstamp_provider_desc hwprov_desc; + struct { + u32 tx_type; + u32 rx_filter; + u32 flags; + } hwtst_config; +}; + +#define TSCONFIG_REPDATA(__reply_base) \ + container_of(__reply_base, struct tsconfig_reply_data, base) + +const struct nla_policy ethnl_tsconfig_get_policy[ETHTOOL_A_TSCONFIG_HEADER + 1] = { + [ETHTOOL_A_TSCONFIG_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int tsconfig_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct tsconfig_reply_data *data = TSCONFIG_REPDATA(reply_base); + struct hwtstamp_provider *hwprov = NULL; + struct net_device *dev = reply_base->dev; + struct kernel_hwtstamp_config cfg = {}; + int ret; + + if (!dev->netdev_ops->ndo_hwtstamp_get) + return -EOPNOTSUPP; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + ret = dev_get_hwtstamp_phylib(dev, &cfg); + if (ret) + goto out; + + data->hwtst_config.tx_type = BIT(cfg.tx_type); + data->hwtst_config.rx_filter = BIT(cfg.rx_filter); + data->hwtst_config.flags = cfg.flags; + + data->hwprov_desc.index = -1; + hwprov = rtnl_dereference(dev->hwprov); + if (hwprov) { + data->hwprov_desc.index = hwprov->desc.index; + data->hwprov_desc.qualifier = hwprov->desc.qualifier; + } else { + struct kernel_ethtool_ts_info ts_info = {}; + + ts_info.phc_index = -1; + ret = __ethtool_get_ts_info(dev, &ts_info); + if (ret) + goto out; + + if (ts_info.phc_index == -1) + return -ENODEV; + + data->hwprov_desc.index = ts_info.phc_index; + data->hwprov_desc.qualifier = ts_info.phc_qualifier; + } + +out: + ethnl_ops_complete(dev); + return ret; +} + +static int tsconfig_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct tsconfig_reply_data *data = TSCONFIG_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + int len = 0; + int ret; + + BUILD_BUG_ON(__HWTSTAMP_TX_CNT > 32); + BUILD_BUG_ON(__HWTSTAMP_FILTER_CNT > 32); + BUILD_BUG_ON(__HWTSTAMP_FLAG_CNT > 32); + + if (data->hwtst_config.flags) { + ret = ethnl_bitset32_size(&data->hwtst_config.flags, + NULL, __HWTSTAMP_FLAG_CNT, + ts_flags_names, compact); + if (ret < 0) + return ret; + len += ret; /* _TSCONFIG_HWTSTAMP_FLAGS */ + } + + if (data->hwtst_config.tx_type) { + ret = ethnl_bitset32_size(&data->hwtst_config.tx_type, + NULL, __HWTSTAMP_TX_CNT, + ts_tx_type_names, compact); + if (ret < 0) + return ret; + len += ret; /* _TSCONFIG_TX_TYPES */ + } + if (data->hwtst_config.rx_filter) { + ret = ethnl_bitset32_size(&data->hwtst_config.rx_filter, + NULL, __HWTSTAMP_FILTER_CNT, + ts_rx_filter_names, compact); + if (ret < 0) + return ret; + len += ret; /* _TSCONFIG_RX_FILTERS */ + } + + if (data->hwprov_desc.index >= 0) + /* _TSCONFIG_HWTSTAMP_PROVIDER */ + len += nla_total_size(0) + + 2 * nla_total_size(sizeof(u32)); + + return len; +} + +static int tsconfig_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct tsconfig_reply_data *data = TSCONFIG_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + int ret; + + if (data->hwtst_config.flags) { + ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSCONFIG_HWTSTAMP_FLAGS, + &data->hwtst_config.flags, NULL, + __HWTSTAMP_FLAG_CNT, + ts_flags_names, compact); + if (ret < 0) + return ret; + } + + if (data->hwtst_config.tx_type) { + ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSCONFIG_TX_TYPES, + &data->hwtst_config.tx_type, NULL, + __HWTSTAMP_TX_CNT, + ts_tx_type_names, compact); + if (ret < 0) + return ret; + } + + if (data->hwtst_config.rx_filter) { + ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSCONFIG_RX_FILTERS, + &data->hwtst_config.rx_filter, + NULL, __HWTSTAMP_FILTER_CNT, + ts_rx_filter_names, compact); + if (ret < 0) + return ret; + } + + if (data->hwprov_desc.index >= 0) { + struct nlattr *nest; + + nest = nla_nest_start(skb, ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX, + data->hwprov_desc.index) || + nla_put_u32(skb, + ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER, + data->hwprov_desc.qualifier)) { + nla_nest_cancel(skb, nest); + return -EMSGSIZE; + } + + nla_nest_end(skb, nest); + } + return 0; +} + +/* TSCONFIG_SET */ +const struct nla_policy ethnl_tsconfig_set_policy[ETHTOOL_A_TSCONFIG_MAX + 1] = { + [ETHTOOL_A_TSCONFIG_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER] = + NLA_POLICY_NESTED(ethnl_ts_hwtst_prov_policy), + [ETHTOOL_A_TSCONFIG_HWTSTAMP_FLAGS] = { .type = NLA_NESTED }, + [ETHTOOL_A_TSCONFIG_RX_FILTERS] = { .type = NLA_NESTED }, + [ETHTOOL_A_TSCONFIG_TX_TYPES] = { .type = NLA_NESTED }, +}; + +static int tsconfig_send_reply(struct net_device *dev, struct genl_info *info) +{ + struct tsconfig_reply_data *reply_data; + struct tsconfig_req_info *req_info; + struct sk_buff *rskb; + void *reply_payload; + int reply_len = 0; + int ret; + + req_info = kzalloc(sizeof(*req_info), GFP_KERNEL); + if (!req_info) + return -ENOMEM; + reply_data = kmalloc(sizeof(*reply_data), GFP_KERNEL); + if (!reply_data) { + kfree(req_info); + return -ENOMEM; + } + + ASSERT_RTNL(); + reply_data->base.dev = dev; + ret = tsconfig_prepare_data(&req_info->base, &reply_data->base, info); + if (ret < 0) + goto err_cleanup; + + ret = tsconfig_reply_size(&req_info->base, &reply_data->base); + if (ret < 0) + goto err_cleanup; + + reply_len = ret + ethnl_reply_header_size(); + rskb = ethnl_reply_init(reply_len, dev, ETHTOOL_MSG_TSCONFIG_SET_REPLY, + ETHTOOL_A_TSCONFIG_HEADER, info, &reply_payload); + if (!rskb) + goto err_cleanup; + + ret = tsconfig_fill_reply(rskb, &req_info->base, &reply_data->base); + if (ret < 0) + goto err_cleanup; + + genlmsg_end(rskb, reply_payload); + ret = genlmsg_reply(rskb, info); + +err_cleanup: + kfree(reply_data); + kfree(req_info); + return ret; +} + +static int ethnl_set_tsconfig_validate(struct ethnl_req_info *req_base, + struct genl_info *info) +{ + const struct net_device_ops *ops = req_base->dev->netdev_ops; + + if (!ops->ndo_hwtstamp_set || !ops->ndo_hwtstamp_get) + return -EOPNOTSUPP; + + return 1; +} + +static struct hwtstamp_provider * +tsconfig_set_hwprov_from_desc(struct net_device *dev, + struct genl_info *info, + struct hwtstamp_provider_desc *hwprov_desc) +{ + struct kernel_ethtool_ts_info ts_info; + struct hwtstamp_provider *hwprov; + struct nlattr **tb = info->attrs; + struct phy_device *phy = NULL; + enum hwtstamp_source source; + int ret; + + ret = ethtool_net_get_ts_info_by_phc(dev, &ts_info, hwprov_desc); + if (!ret) { + /* Found */ + source = HWTSTAMP_SOURCE_NETDEV; + } else { + phy = ethtool_phy_get_ts_info_by_phc(dev, &ts_info, hwprov_desc); + if (IS_ERR(phy)) { + if (PTR_ERR(phy) == -ENODEV) + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER], + "phc not in this net device topology"); + return ERR_CAST(phy); + } + + source = HWTSTAMP_SOURCE_PHYLIB; + } + + hwprov = kzalloc(sizeof(*hwprov), GFP_KERNEL); + if (!hwprov) + return ERR_PTR(-ENOMEM); + + hwprov->desc.index = hwprov_desc->index; + hwprov->desc.qualifier = hwprov_desc->qualifier; + hwprov->source = source; + hwprov->phydev = phy; + + return hwprov; +} + +static int ethnl_set_tsconfig(struct ethnl_req_info *req_base, + struct genl_info *info) +{ + struct kernel_hwtstamp_config hwtst_config = {0}; + bool hwprov_mod = false, config_mod = false; + struct hwtstamp_provider *hwprov = NULL; + struct net_device *dev = req_base->dev; + struct nlattr **tb = info->attrs; + int ret; + + BUILD_BUG_ON(__HWTSTAMP_TX_CNT >= 32); + BUILD_BUG_ON(__HWTSTAMP_FILTER_CNT >= 32); + BUILD_BUG_ON(__HWTSTAMP_FLAG_CNT > 32); + + if (!netif_device_present(dev)) + return -ENODEV; + + if (tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER]) { + struct hwtstamp_provider_desc __hwprov_desc = {.index = -1}; + struct hwtstamp_provider *__hwprov; + + __hwprov = rtnl_dereference(dev->hwprov); + if (__hwprov) { + __hwprov_desc.index = __hwprov->desc.index; + __hwprov_desc.qualifier = __hwprov->desc.qualifier; + } + + ret = ts_parse_hwtst_provider(tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER], + &__hwprov_desc, info->extack, + &hwprov_mod); + if (ret < 0) + return ret; + + if (hwprov_mod) { + hwprov = tsconfig_set_hwprov_from_desc(dev, info, + &__hwprov_desc); + if (IS_ERR(hwprov)) + return PTR_ERR(hwprov); + } + } + + /* Get current hwtstamp config if we are not changing the + * hwtstamp source. It will be zeroed in the other case. + */ + if (!hwprov_mod) { + ret = dev_get_hwtstamp_phylib(dev, &hwtst_config); + if (ret < 0 && ret != -EOPNOTSUPP) + goto err_free_hwprov; + } + + /* Get the hwtstamp config from netlink */ + if (tb[ETHTOOL_A_TSCONFIG_TX_TYPES]) { + u32 req_tx_type; + + req_tx_type = BIT(hwtst_config.tx_type); + ret = ethnl_update_bitset32(&req_tx_type, + __HWTSTAMP_TX_CNT, + tb[ETHTOOL_A_TSCONFIG_TX_TYPES], + ts_tx_type_names, info->extack, + &config_mod); + if (ret < 0) + goto err_free_hwprov; + + /* Select only one tx type at a time */ + if (ffs(req_tx_type) != fls(req_tx_type)) { + ret = -EINVAL; + goto err_free_hwprov; + } + + hwtst_config.tx_type = ffs(req_tx_type) - 1; + } + + if (tb[ETHTOOL_A_TSCONFIG_RX_FILTERS]) { + u32 req_rx_filter; + + req_rx_filter = BIT(hwtst_config.rx_filter); + ret = ethnl_update_bitset32(&req_rx_filter, + __HWTSTAMP_FILTER_CNT, + tb[ETHTOOL_A_TSCONFIG_RX_FILTERS], + ts_rx_filter_names, info->extack, + &config_mod); + if (ret < 0) + goto err_free_hwprov; + + /* Select only one rx filter at a time */ + if (ffs(req_rx_filter) != fls(req_rx_filter)) { + ret = -EINVAL; + goto err_free_hwprov; + } + + hwtst_config.rx_filter = ffs(req_rx_filter) - 1; + } + + if (tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_FLAGS]) { + ret = ethnl_update_bitset32(&hwtst_config.flags, + __HWTSTAMP_FLAG_CNT, + tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_FLAGS], + ts_flags_names, info->extack, + &config_mod); + if (ret < 0) + goto err_free_hwprov; + } + + ret = net_hwtstamp_validate(&hwtst_config); + if (ret) + goto err_free_hwprov; + + if (hwprov_mod) { + struct kernel_hwtstamp_config zero_config = {0}; + struct hwtstamp_provider *__hwprov; + + /* Disable current time stamping if we try to enable + * another one + */ + ret = dev_set_hwtstamp_phylib(dev, &zero_config, info->extack); + if (ret < 0) + goto err_free_hwprov; + + /* Change the selected hwtstamp source */ + __hwprov = rcu_replace_pointer_rtnl(dev->hwprov, hwprov); + if (__hwprov) + kfree_rcu(__hwprov, rcu_head); + } + + if (config_mod) { + ret = dev_set_hwtstamp_phylib(dev, &hwtst_config, + info->extack); + if (ret < 0) + return ret; + } + + ret = tsconfig_send_reply(dev, info); + if (ret && ret != -EOPNOTSUPP) { + NL_SET_ERR_MSG(info->extack, + "error while reading the new configuration set"); + return ret; + } + + /* tsconfig has no notification */ + return 0; + +err_free_hwprov: + kfree(hwprov); + + return ret; +} + +const struct ethnl_request_ops ethnl_tsconfig_request_ops = { + .request_cmd = ETHTOOL_MSG_TSCONFIG_GET, + .reply_cmd = ETHTOOL_MSG_TSCONFIG_GET_REPLY, + .hdr_attr = ETHTOOL_A_TSCONFIG_HEADER, + .req_info_size = sizeof(struct tsconfig_req_info), + .reply_data_size = sizeof(struct tsconfig_reply_data), + + .prepare_data = tsconfig_prepare_data, + .reply_size = tsconfig_reply_size, + .fill_reply = tsconfig_fill_reply, + + .set_validate = ethnl_set_tsconfig_validate, + .set = ethnl_set_tsconfig, +}; diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c new file mode 100644 index 000000000000..8c654caa6805 --- /dev/null +++ b/net/ethtool/tsinfo.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/net_tstamp.h> +#include <linux/phy.h> +#include <linux/phy_link_topology.h> +#include <linux/ptp_clock_kernel.h> +#include <net/netdev_lock.h> + +#include "netlink.h" +#include "common.h" +#include "bitset.h" +#include "ts.h" + +struct tsinfo_req_info { + struct ethnl_req_info base; + struct hwtstamp_provider_desc hwprov_desc; +}; + +struct tsinfo_reply_data { + struct ethnl_reply_data base; + struct kernel_ethtool_ts_info ts_info; + struct ethtool_ts_stats stats; +}; + +#define TSINFO_REQINFO(__req_base) \ + container_of(__req_base, struct tsinfo_req_info, base) + +#define TSINFO_REPDATA(__reply_base) \ + container_of(__reply_base, struct tsinfo_reply_data, base) + +#define ETHTOOL_TS_STAT_CNT \ + (__ETHTOOL_A_TS_STAT_CNT - (ETHTOOL_A_TS_STAT_UNSPEC + 1)) + +const struct nla_policy ethnl_tsinfo_get_policy[ETHTOOL_A_TSINFO_MAX + 1] = { + [ETHTOOL_A_TSINFO_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy_stats), + [ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER] = + NLA_POLICY_NESTED(ethnl_ts_hwtst_prov_policy), +}; + +int ts_parse_hwtst_provider(const struct nlattr *nest, + struct hwtstamp_provider_desc *hwprov_desc, + struct netlink_ext_ack *extack, + bool *mod) +{ + struct nlattr *tb[ARRAY_SIZE(ethnl_ts_hwtst_prov_policy)]; + int ret; + + ret = nla_parse_nested(tb, + ARRAY_SIZE(ethnl_ts_hwtst_prov_policy) - 1, + nest, + ethnl_ts_hwtst_prov_policy, extack); + if (ret < 0) + return ret; + + if (NL_REQ_ATTR_CHECK(extack, nest, tb, + ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX) || + NL_REQ_ATTR_CHECK(extack, nest, tb, + ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER)) + return -EINVAL; + + ethnl_update_u32(&hwprov_desc->index, + tb[ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX], + mod); + ethnl_update_u32(&hwprov_desc->qualifier, + tb[ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER], + mod); + + return 0; +} + +static int +tsinfo_parse_request(struct ethnl_req_info *req_base, struct nlattr **tb, + struct netlink_ext_ack *extack) +{ + struct tsinfo_req_info *req = TSINFO_REQINFO(req_base); + bool mod = false; + + req->hwprov_desc.index = -1; + + if (!tb[ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER]) + return 0; + + return ts_parse_hwtst_provider(tb[ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER], + &req->hwprov_desc, extack, &mod); +} + +static int tsinfo_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base); + struct tsinfo_req_info *req = TSINFO_REQINFO(req_base); + struct net_device *dev = reply_base->dev; + int ret; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + if (req->hwprov_desc.index != -1) { + ret = ethtool_get_ts_info_by_phc(dev, &data->ts_info, + &req->hwprov_desc); + ethnl_ops_complete(dev); + return ret; + } + + if (req_base->flags & ETHTOOL_FLAG_STATS) { + ethtool_stats_init((u64 *)&data->stats, + sizeof(data->stats) / sizeof(u64)); + if (dev->ethtool_ops->get_ts_stats) + dev->ethtool_ops->get_ts_stats(dev, &data->stats); + } + + ret = __ethtool_get_ts_info(dev, &data->ts_info); + ethnl_ops_complete(dev); + + return ret; +} + +static int tsinfo_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct kernel_ethtool_ts_info *ts_info = &data->ts_info; + int len = 0; + int ret; + + BUILD_BUG_ON(__SOF_TIMESTAMPING_CNT > 32); + BUILD_BUG_ON(__HWTSTAMP_TX_CNT > 32); + BUILD_BUG_ON(__HWTSTAMP_FILTER_CNT > 32); + + if (ts_info->so_timestamping) { + ret = ethnl_bitset32_size(&ts_info->so_timestamping, NULL, + __SOF_TIMESTAMPING_CNT, + sof_timestamping_names, compact); + if (ret < 0) + return ret; + len += ret; /* _TSINFO_TIMESTAMPING */ + } + if (ts_info->tx_types) { + ret = ethnl_bitset32_size(&ts_info->tx_types, NULL, + __HWTSTAMP_TX_CNT, + ts_tx_type_names, compact); + if (ret < 0) + return ret; + len += ret; /* _TSINFO_TX_TYPES */ + } + if (ts_info->rx_filters) { + ret = ethnl_bitset32_size(&ts_info->rx_filters, NULL, + __HWTSTAMP_FILTER_CNT, + ts_rx_filter_names, compact); + if (ret < 0) + return ret; + len += ret; /* _TSINFO_RX_FILTERS */ + } + if (ts_info->phc_index >= 0) { + len += nla_total_size(sizeof(u32)); /* _TSINFO_PHC_INDEX */ + /* _TSINFO_HWTSTAMP_PROVIDER */ + len += nla_total_size(0) + 2 * nla_total_size(sizeof(u32)); + } + if (ts_info->phc_source) { + len += nla_total_size(sizeof(u32)); /* _TSINFO_HWTSTAMP_SOURCE */ + if (ts_info->phc_phyindex) + /* _TSINFO_HWTSTAMP_PHYINDEX */ + len += nla_total_size(sizeof(u32)); + } + if (req_base->flags & ETHTOOL_FLAG_STATS) + len += nla_total_size(0) + /* _TSINFO_STATS */ + nla_total_size_64bit(sizeof(u64)) * ETHTOOL_TS_STAT_CNT; + + return len; +} + +static int tsinfo_put_stat(struct sk_buff *skb, u64 val, u16 attrtype) +{ + if (val == ETHTOOL_STAT_NOT_SET) + return 0; + if (nla_put_uint(skb, attrtype, val)) + return -EMSGSIZE; + return 0; +} + +static int tsinfo_put_stats(struct sk_buff *skb, + const struct ethtool_ts_stats *stats) +{ + struct nlattr *nest; + + nest = nla_nest_start(skb, ETHTOOL_A_TSINFO_STATS); + if (!nest) + return -EMSGSIZE; + + if (tsinfo_put_stat(skb, stats->tx_stats.pkts, + ETHTOOL_A_TS_STAT_TX_PKTS) || + tsinfo_put_stat(skb, stats->tx_stats.onestep_pkts_unconfirmed, + ETHTOOL_A_TS_STAT_TX_ONESTEP_PKTS_UNCONFIRMED) || + tsinfo_put_stat(skb, stats->tx_stats.lost, + ETHTOOL_A_TS_STAT_TX_LOST) || + tsinfo_put_stat(skb, stats->tx_stats.err, + ETHTOOL_A_TS_STAT_TX_ERR)) + goto err_cancel; + + nla_nest_end(skb, nest); + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int tsinfo_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base); + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct kernel_ethtool_ts_info *ts_info = &data->ts_info; + int ret; + + if (ts_info->so_timestamping) { + ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSINFO_TIMESTAMPING, + &ts_info->so_timestamping, NULL, + __SOF_TIMESTAMPING_CNT, + sof_timestamping_names, compact); + if (ret < 0) + return ret; + } + if (ts_info->tx_types) { + ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSINFO_TX_TYPES, + &ts_info->tx_types, NULL, + __HWTSTAMP_TX_CNT, + ts_tx_type_names, compact); + if (ret < 0) + return ret; + } + if (ts_info->rx_filters) { + ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSINFO_RX_FILTERS, + &ts_info->rx_filters, NULL, + __HWTSTAMP_FILTER_CNT, + ts_rx_filter_names, compact); + if (ret < 0) + return ret; + } + if (ts_info->phc_index >= 0) { + struct nlattr *nest; + + ret = nla_put_u32(skb, ETHTOOL_A_TSINFO_PHC_INDEX, + ts_info->phc_index); + if (ret) + return -EMSGSIZE; + + nest = nla_nest_start(skb, ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX, + ts_info->phc_index) || + nla_put_u32(skb, + ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER, + ts_info->phc_qualifier)) { + nla_nest_cancel(skb, nest); + return -EMSGSIZE; + } + + nla_nest_end(skb, nest); + } + if (ts_info->phc_source) { + if (nla_put_u32(skb, ETHTOOL_A_TSINFO_HWTSTAMP_SOURCE, + ts_info->phc_source)) + return -EMSGSIZE; + + if (ts_info->phc_phyindex && + nla_put_u32(skb, ETHTOOL_A_TSINFO_HWTSTAMP_PHYINDEX, + ts_info->phc_phyindex)) + return -EMSGSIZE; + } + if (req_base->flags & ETHTOOL_FLAG_STATS && + tsinfo_put_stats(skb, &data->stats)) + return -EMSGSIZE; + + return 0; +} + +struct ethnl_tsinfo_dump_ctx { + struct tsinfo_req_info *req_info; + struct tsinfo_reply_data *reply_data; + unsigned long pos_ifindex; + bool netdev_dump_done; + unsigned long pos_phyindex; + enum hwtstamp_provider_qualifier pos_phcqualifier; +}; + +static void *ethnl_tsinfo_prepare_dump(struct sk_buff *skb, + struct net_device *dev, + struct tsinfo_reply_data *reply_data, + struct netlink_callback *cb) +{ + struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; + void *ehdr = NULL; + + ehdr = ethnl_dump_put(skb, cb, + ETHTOOL_MSG_TSINFO_GET_REPLY); + if (!ehdr) + return ERR_PTR(-EMSGSIZE); + + reply_data = ctx->reply_data; + memset(reply_data, 0, sizeof(*reply_data)); + reply_data->base.dev = dev; + reply_data->ts_info.cmd = ETHTOOL_GET_TS_INFO; + reply_data->ts_info.phc_index = -1; + + return ehdr; +} + +static int ethnl_tsinfo_end_dump(struct sk_buff *skb, + struct net_device *dev, + struct tsinfo_req_info *req_info, + struct tsinfo_reply_data *reply_data, + void *ehdr) +{ + int ret; + + reply_data->ts_info.so_timestamping |= SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TSINFO_HEADER); + if (ret < 0) + return ret; + + ret = tsinfo_fill_reply(skb, &req_info->base, &reply_data->base); + if (ret < 0) + return ret; + + reply_data->base.dev = NULL; + genlmsg_end(skb, ehdr); + + return ret; +} + +static int ethnl_tsinfo_dump_one_phydev(struct sk_buff *skb, + struct net_device *dev, + struct phy_device *phydev, + struct netlink_callback *cb) +{ + struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; + struct tsinfo_reply_data *reply_data; + struct tsinfo_req_info *req_info; + void *ehdr = NULL; + int ret = 0; + + if (!phy_has_tsinfo(phydev)) + return -EOPNOTSUPP; + + reply_data = ctx->reply_data; + req_info = ctx->req_info; + ehdr = ethnl_tsinfo_prepare_dump(skb, dev, reply_data, cb); + if (IS_ERR(ehdr)) + return PTR_ERR(ehdr); + + ret = phy_ts_info(phydev, &reply_data->ts_info); + if (ret < 0) + goto err; + + if (reply_data->ts_info.phc_index >= 0) { + reply_data->ts_info.phc_source = HWTSTAMP_SOURCE_PHYLIB; + reply_data->ts_info.phc_phyindex = phydev->phyindex; + } + + ret = ethnl_tsinfo_end_dump(skb, dev, req_info, reply_data, ehdr); + if (ret < 0) + goto err; + + return ret; +err: + genlmsg_cancel(skb, ehdr); + return ret; +} + +static int ethnl_tsinfo_dump_one_netdev(struct sk_buff *skb, + struct net_device *dev, + struct netlink_callback *cb) +{ + struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; + const struct ethtool_ops *ops = dev->ethtool_ops; + struct tsinfo_reply_data *reply_data; + struct tsinfo_req_info *req_info; + void *ehdr = NULL; + int ret = 0; + + if (!ops->get_ts_info) + return -EOPNOTSUPP; + + reply_data = ctx->reply_data; + req_info = ctx->req_info; + for (; ctx->pos_phcqualifier < HWTSTAMP_PROVIDER_QUALIFIER_CNT; + ctx->pos_phcqualifier++) { + if (!net_support_hwtstamp_qualifier(dev, + ctx->pos_phcqualifier)) + continue; + + ehdr = ethnl_tsinfo_prepare_dump(skb, dev, reply_data, cb); + if (IS_ERR(ehdr)) { + ret = PTR_ERR(ehdr); + goto err; + } + + reply_data->ts_info.phc_qualifier = ctx->pos_phcqualifier; + ret = ops->get_ts_info(dev, &reply_data->ts_info); + if (ret < 0) + goto err; + + if (reply_data->ts_info.phc_index >= 0) + reply_data->ts_info.phc_source = HWTSTAMP_SOURCE_NETDEV; + ret = ethnl_tsinfo_end_dump(skb, dev, req_info, reply_data, + ehdr); + if (ret < 0) + goto err; + } + + return ret; + +err: + genlmsg_cancel(skb, ehdr); + return ret; +} + +static int ethnl_tsinfo_dump_one_net_topo(struct sk_buff *skb, + struct net_device *dev, + struct netlink_callback *cb) +{ + struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; + struct phy_device_node *pdn; + int ret = 0; + + if (!ctx->netdev_dump_done) { + ret = ethnl_tsinfo_dump_one_netdev(skb, dev, cb); + if (ret < 0 && ret != -EOPNOTSUPP) + return ret; + ctx->netdev_dump_done = true; + } + + if (!dev->link_topo) { + if (phy_has_tsinfo(dev->phydev)) { + ret = ethnl_tsinfo_dump_one_phydev(skb, dev, + dev->phydev, cb); + if (ret < 0 && ret != -EOPNOTSUPP) + return ret; + } + + return 0; + } + + xa_for_each_start(&dev->link_topo->phys, ctx->pos_phyindex, pdn, + ctx->pos_phyindex) { + if (phy_has_tsinfo(pdn->phy)) { + ret = ethnl_tsinfo_dump_one_phydev(skb, dev, + pdn->phy, cb); + if (ret < 0 && ret != -EOPNOTSUPP) + return ret; + } + } + + return ret; +} + +int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; + struct net *net = sock_net(skb->sk); + struct net_device *dev; + int ret = 0; + + rtnl_lock(); + if (ctx->req_info->base.dev) { + dev = ctx->req_info->base.dev; + netdev_lock_ops(dev); + ret = ethnl_tsinfo_dump_one_net_topo(skb, dev, cb); + netdev_unlock_ops(dev); + } else { + for_each_netdev_dump(net, dev, ctx->pos_ifindex) { + netdev_lock_ops(dev); + ret = ethnl_tsinfo_dump_one_net_topo(skb, dev, cb); + netdev_unlock_ops(dev); + if (ret < 0 && ret != -EOPNOTSUPP) + break; + ctx->pos_phyindex = 0; + ctx->netdev_dump_done = false; + ctx->pos_phcqualifier = HWTSTAMP_PROVIDER_QUALIFIER_PRECISE; + } + } + rtnl_unlock(); + + return ret; +} + +int ethnl_tsinfo_start(struct netlink_callback *cb) +{ + const struct genl_dumpit_info *info = genl_dumpit_info(cb); + struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; + struct nlattr **tb = info->info.attrs; + struct tsinfo_reply_data *reply_data; + struct tsinfo_req_info *req_info; + int ret; + + BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); + + req_info = kzalloc(sizeof(*req_info), GFP_KERNEL); + if (!req_info) + return -ENOMEM; + reply_data = kzalloc(sizeof(*reply_data), GFP_KERNEL); + if (!reply_data) { + ret = -ENOMEM; + goto free_req_info; + } + + ret = ethnl_parse_header_dev_get(&req_info->base, + tb[ETHTOOL_A_TSINFO_HEADER], + sock_net(cb->skb->sk), cb->extack, + false); + if (ret < 0) + goto free_reply_data; + + ctx->req_info = req_info; + ctx->reply_data = reply_data; + ctx->pos_ifindex = 0; + ctx->pos_phyindex = 0; + ctx->netdev_dump_done = false; + ctx->pos_phcqualifier = HWTSTAMP_PROVIDER_QUALIFIER_PRECISE; + + return 0; + +free_reply_data: + kfree(reply_data); +free_req_info: + kfree(req_info); + + return ret; +} + +int ethnl_tsinfo_done(struct netlink_callback *cb) +{ + struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; + struct tsinfo_req_info *req_info = ctx->req_info; + + ethnl_parse_header_dev_put(&req_info->base); + kfree(ctx->reply_data); + kfree(ctx->req_info); + + return 0; +} + +const struct ethnl_request_ops ethnl_tsinfo_request_ops = { + .request_cmd = ETHTOOL_MSG_TSINFO_GET, + .reply_cmd = ETHTOOL_MSG_TSINFO_GET_REPLY, + .hdr_attr = ETHTOOL_A_TSINFO_HEADER, + .req_info_size = sizeof(struct tsinfo_req_info), + .reply_data_size = sizeof(struct tsinfo_reply_data), + + .parse_request = tsinfo_parse_request, + .prepare_data = tsinfo_prepare_data, + .reply_size = tsinfo_reply_size, + .fill_reply = tsinfo_fill_reply, +}; diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c new file mode 100644 index 000000000000..b4ce47dd2aa6 --- /dev/null +++ b/net/ethtool/tunnels.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool_netlink.h> +#include <net/udp_tunnel.h> +#include <net/vxlan.h> + +#include "bitset.h" +#include "common.h" +#include "netlink.h" + +const struct nla_policy ethnl_tunnel_info_get_policy[] = { + [ETHTOOL_A_TUNNEL_INFO_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN)); +static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE)); +static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE == + ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE)); + +static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact) +{ + ssize_t size; + + size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact); + if (size < 0) + return size; + + return size + + nla_total_size(0) + /* _UDP_TABLE */ + nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */ +} + +static ssize_t +ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, + struct netlink_ext_ack *extack) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct udp_tunnel_nic_info *info; + unsigned int i; + ssize_t ret; + size_t size; + + info = req_base->dev->udp_tunnel_nic_info; + if (!info) { + NL_SET_ERR_MSG(extack, + "device does not report tunnel offload info"); + return -EOPNOTSUPP; + } + + size = nla_total_size(0); /* _INFO_UDP_PORTS */ + + for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { + if (!info->tables[i].n_entries) + break; + + ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types, + compact); + if (ret < 0) + return ret; + size += ret; + + size += udp_tunnel_nic_dump_size(req_base->dev, i); + } + + if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) { + ret = ethnl_udp_table_reply_size(0, compact); + if (ret < 0) + return ret; + size += ret; + + size += nla_total_size(0) + /* _TABLE_ENTRY */ + nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */ + nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */ + } + + return size; +} + +static int +ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base, + struct sk_buff *skb) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct udp_tunnel_nic_info *info; + struct nlattr *ports, *table, *entry; + unsigned int i; + + info = req_base->dev->udp_tunnel_nic_info; + if (!info) + return -EOPNOTSUPP; + + ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS); + if (!ports) + return -EMSGSIZE; + + for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { + if (!info->tables[i].n_entries) + break; + + table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE); + if (!table) + goto err_cancel_ports; + + if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, + info->tables[i].n_entries)) + goto err_cancel_table; + + if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, + &info->tables[i].tunnel_types, NULL, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact)) + goto err_cancel_table; + + if (udp_tunnel_nic_dump_write(req_base->dev, i, skb)) + goto err_cancel_table; + + nla_nest_end(skb, table); + } + + if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) { + u32 zero = 0; + + table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE); + if (!table) + goto err_cancel_ports; + + if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1)) + goto err_cancel_table; + + if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, + &zero, NULL, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact)) + goto err_cancel_table; + + entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY); + if (!entry) + goto err_cancel_entry; + + if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, + htons(IANA_VXLAN_UDP_PORT)) || + nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, + ilog2(UDP_TUNNEL_TYPE_VXLAN))) + goto err_cancel_entry; + + nla_nest_end(skb, entry); + nla_nest_end(skb, table); + } + + nla_nest_end(skb, ports); + + return 0; + +err_cancel_entry: + nla_nest_cancel(skb, entry); +err_cancel_table: + nla_nest_cancel(skb, table); +err_cancel_ports: + nla_nest_cancel(skb, ports); + return -EMSGSIZE; +} + +int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct ethnl_req_info req_info = {}; + struct nlattr **tb = info->attrs; + struct sk_buff *rskb; + void *reply_payload; + int reply_len; + int ret; + + ret = ethnl_parse_header_dev_get(&req_info, + tb[ETHTOOL_A_TUNNEL_INFO_HEADER], + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + + rtnl_lock(); + ret = ethnl_tunnel_info_reply_size(&req_info, info->extack); + if (ret < 0) + goto err_unlock_rtnl; + reply_len = ret + ethnl_reply_header_size(); + + rskb = ethnl_reply_init(reply_len, req_info.dev, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY, + ETHTOOL_A_TUNNEL_INFO_HEADER, + info, &reply_payload); + if (!rskb) { + ret = -ENOMEM; + goto err_unlock_rtnl; + } + + ret = ethnl_tunnel_info_fill_reply(&req_info, rskb); + if (ret) + goto err_free_msg; + rtnl_unlock(); + ethnl_parse_header_dev_put(&req_info); + genlmsg_end(rskb, reply_payload); + + return genlmsg_reply(rskb, info); + +err_free_msg: + nlmsg_free(rskb); +err_unlock_rtnl: + rtnl_unlock(); + ethnl_parse_header_dev_put(&req_info); + return ret; +} + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + unsigned long ifindex; +}; + +int ethnl_tunnel_info_start(struct netlink_callback *cb) +{ + const struct genl_dumpit_info *info = genl_dumpit_info(cb); + struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx; + struct nlattr **tb = info->info.attrs; + int ret; + + BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); + + memset(ctx, 0, sizeof(*ctx)); + + ret = ethnl_parse_header_dev_get(&ctx->req_info, + tb[ETHTOOL_A_TUNNEL_INFO_HEADER], + sock_net(cb->skb->sk), cb->extack, + false); + if (ctx->req_info.dev) { + ethnl_parse_header_dev_put(&ctx->req_info); + ctx->req_info.dev = NULL; + } + + return ret; +} + +int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx; + struct net *net = sock_net(skb->sk); + struct net_device *dev; + int ret = 0; + void *ehdr; + + rtnl_lock(); + for_each_netdev_dump(net, dev, ctx->ifindex) { + ehdr = ethnl_dump_put(skb, cb, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY); + if (!ehdr) { + ret = -EMSGSIZE; + break; + } + + ret = ethnl_fill_reply_header(skb, dev, + ETHTOOL_A_TUNNEL_INFO_HEADER); + if (ret < 0) { + genlmsg_cancel(skb, ehdr); + break; + } + + ctx->req_info.dev = dev; + ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb); + ctx->req_info.dev = NULL; + if (ret < 0) { + genlmsg_cancel(skb, ehdr); + if (ret == -EOPNOTSUPP) + continue; + break; + } + genlmsg_end(skb, ehdr); + } + rtnl_unlock(); + + if (ret == -EMSGSIZE && skb->len) + return skb->len; + return ret; +} diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c new file mode 100644 index 000000000000..a39d8000d808 --- /dev/null +++ b/net/ethtool/wol.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "netlink.h" +#include "common.h" +#include "bitset.h" + +struct wol_req_info { + struct ethnl_req_info base; +}; + +struct wol_reply_data { + struct ethnl_reply_data base; + struct ethtool_wolinfo wol; + bool show_sopass; +}; + +#define WOL_REPDATA(__reply_base) \ + container_of(__reply_base, struct wol_reply_data, base) + +const struct nla_policy ethnl_wol_get_policy[] = { + [ETHTOOL_A_WOL_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int wol_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct wol_reply_data *data = WOL_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + if (!dev->ethtool_ops->get_wol) + return -EOPNOTSUPP; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + dev->ethtool_ops->get_wol(dev, &data->wol); + ethnl_ops_complete(dev); + /* do not include password in notifications */ + data->show_sopass = !genl_info_is_ntf(info) && + (data->wol.supported & WAKE_MAGICSECURE); + + return 0; +} + +static int wol_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct wol_reply_data *data = WOL_REPDATA(reply_base); + int len; + + len = ethnl_bitset32_size(&data->wol.wolopts, &data->wol.supported, + WOL_MODE_COUNT, wol_mode_names, compact); + if (len < 0) + return len; + if (data->show_sopass) + len += nla_total_size(sizeof(data->wol.sopass)); + + return len; +} + +static int wol_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct wol_reply_data *data = WOL_REPDATA(reply_base); + int ret; + + ret = ethnl_put_bitset32(skb, ETHTOOL_A_WOL_MODES, &data->wol.wolopts, + &data->wol.supported, WOL_MODE_COUNT, + wol_mode_names, compact); + if (ret < 0) + return ret; + if (data->show_sopass && + nla_put(skb, ETHTOOL_A_WOL_SOPASS, sizeof(data->wol.sopass), + data->wol.sopass)) + return -EMSGSIZE; + + return 0; +} + +/* WOL_SET */ + +const struct nla_policy ethnl_wol_set_policy[] = { + [ETHTOOL_A_WOL_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_WOL_MODES] = { .type = NLA_NESTED }, + [ETHTOOL_A_WOL_SOPASS] = { .type = NLA_BINARY, + .len = SOPASS_MAX }, +}; + +static int +ethnl_set_wol_validate(struct ethnl_req_info *req_info, struct genl_info *info) +{ + const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + + return ops->get_wol && ops->set_wol ? 1 : -EOPNOTSUPP; +} + +static int +ethnl_set_wol(struct ethnl_req_info *req_info, struct genl_info *info) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct net_device *dev = req_info->dev; + struct nlattr **tb = info->attrs; + bool mod = false; + int ret; + + dev->ethtool_ops->get_wol(dev, &wol); + ret = ethnl_update_bitset32(&wol.wolopts, WOL_MODE_COUNT, + tb[ETHTOOL_A_WOL_MODES], wol_mode_names, + info->extack, &mod); + if (ret < 0) + return ret; + if (wol.wolopts & ~wol.supported) { + NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_WOL_MODES], + "cannot enable unsupported WoL mode"); + return -EINVAL; + } + if (tb[ETHTOOL_A_WOL_SOPASS]) { + if (!(wol.supported & WAKE_MAGICSECURE)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_WOL_SOPASS], + "magicsecure not supported, cannot set password"); + return -EINVAL; + } + ethnl_update_binary(wol.sopass, sizeof(wol.sopass), + tb[ETHTOOL_A_WOL_SOPASS], &mod); + } + + if (!mod) + return 0; + ret = dev->ethtool_ops->set_wol(dev, &wol); + if (ret) + return ret; + dev->ethtool->wol_enabled = !!wol.wolopts; + return 1; +} + +const struct ethnl_request_ops ethnl_wol_request_ops = { + .request_cmd = ETHTOOL_MSG_WOL_GET, + .reply_cmd = ETHTOOL_MSG_WOL_GET_REPLY, + .hdr_attr = ETHTOOL_A_WOL_HEADER, + .req_info_size = sizeof(struct wol_req_info), + .reply_data_size = sizeof(struct wol_reply_data), + + .prepare_data = wol_prepare_data, + .reply_size = wol_reply_size, + .fill_reply = wol_fill_reply, + + .set_validate = ethnl_set_wol_validate, + .set = ethnl_set_wol, + .set_ntf_cmd = ETHTOOL_MSG_WOL_NTF, +}; |
