diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-07-10 23:24:10 -0700 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-07-10 23:24:10 -0700 |
commit | 597473720f4dc69749542bfcfed4a927a43d935e (patch) | |
tree | 711bf773910fb93d1dd9120c633adc807685e0d8 /net/switchdev/switchdev.c | |
parent | f0dd687815f9546860fc3ac4379d55da045942c9 (diff) | |
parent | 593fdd4fb44ef2cbf4ec53ec2c6eb60eb079bb4c (diff) |
Merge branch 'next' into for-linus
Prepare input updates for 5.3 merge window.
Diffstat (limited to 'net/switchdev/switchdev.c')
-rw-r--r-- | net/switchdev/switchdev.c | 470 |
1 files changed, 240 insertions, 230 deletions
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 74b9d916a58b..90ba4a1f0a6d 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -23,78 +23,6 @@ #include <linux/rtnetlink.h> #include <net/switchdev.h> -/** - * switchdev_trans_item_enqueue - Enqueue data item to transaction queue - * - * @trans: transaction - * @data: pointer to data being queued - * @destructor: data destructor - * @tritem: transaction item being queued - * - * Enqeueue data item to transaction queue. tritem is typically placed in - * cointainter pointed at by data pointer. Destructor is called on - * transaction abort and after successful commit phase in case - * the caller did not dequeue the item before. - */ -void switchdev_trans_item_enqueue(struct switchdev_trans *trans, - void *data, void (*destructor)(void const *), - struct switchdev_trans_item *tritem) -{ - tritem->data = data; - tritem->destructor = destructor; - list_add_tail(&tritem->list, &trans->item_list); -} -EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue); - -static struct switchdev_trans_item * -__switchdev_trans_item_dequeue(struct switchdev_trans *trans) -{ - struct switchdev_trans_item *tritem; - - if (list_empty(&trans->item_list)) - return NULL; - tritem = list_first_entry(&trans->item_list, - struct switchdev_trans_item, list); - list_del(&tritem->list); - return tritem; -} - -/** - * switchdev_trans_item_dequeue - Dequeue data item from transaction queue - * - * @trans: transaction - */ -void *switchdev_trans_item_dequeue(struct switchdev_trans *trans) -{ - struct switchdev_trans_item *tritem; - - tritem = __switchdev_trans_item_dequeue(trans); - BUG_ON(!tritem); - return tritem->data; -} -EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue); - -static void switchdev_trans_init(struct switchdev_trans *trans) -{ - INIT_LIST_HEAD(&trans->item_list); -} - -static void switchdev_trans_items_destroy(struct switchdev_trans *trans) -{ - struct switchdev_trans_item *tritem; - - while ((tritem = __switchdev_trans_item_dequeue(trans))) - tritem->destructor(tritem->data); -} - -static void switchdev_trans_items_warn_destroy(struct net_device *dev, - struct switchdev_trans *trans) -{ - WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n", - dev->name); - switchdev_trans_items_destroy(trans); -} - static LIST_HEAD(deferred); static DEFINE_SPINLOCK(deferred_lock); @@ -174,81 +102,32 @@ static int switchdev_deferred_enqueue(struct net_device *dev, return 0; } -/** - * switchdev_port_attr_get - Get port attribute - * - * @dev: port device - * @attr: attribute to get - */ -int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) +static int switchdev_port_attr_notify(enum switchdev_notifier_type nt, + struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) { - const struct switchdev_ops *ops = dev->switchdev_ops; - struct net_device *lower_dev; - struct list_head *iter; - struct switchdev_attr first = { - .id = SWITCHDEV_ATTR_ID_UNDEFINED - }; - int err = -EOPNOTSUPP; + int err; + int rc; - if (ops && ops->switchdev_port_attr_get) - return ops->switchdev_port_attr_get(dev, attr); + struct switchdev_notifier_port_attr_info attr_info = { + .attr = attr, + .trans = trans, + .handled = false, + }; - if (attr->flags & SWITCHDEV_F_NO_RECURSE) + rc = call_switchdev_blocking_notifiers(nt, dev, + &attr_info.info, NULL); + err = notifier_to_errno(rc); + if (err) { + WARN_ON(!attr_info.handled); return err; - - /* Switch device port(s) may be stacked under - * bond/team/vlan dev, so recurse down to get attr on - * each port. Return -ENODATA if attr values don't - * compare across ports. - */ - - netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = switchdev_port_attr_get(lower_dev, attr); - if (err) - break; - if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED) - first = *attr; - else if (memcmp(&first, attr, sizeof(*attr))) - return -ENODATA; } - return err; -} -EXPORT_SYMBOL_GPL(switchdev_port_attr_get); + if (!attr_info.handled) + return -EOPNOTSUPP; -static int __switchdev_port_attr_set(struct net_device *dev, - const struct switchdev_attr *attr, - struct switchdev_trans *trans) -{ - const struct switchdev_ops *ops = dev->switchdev_ops; - struct net_device *lower_dev; - struct list_head *iter; - int err = -EOPNOTSUPP; - - if (ops && ops->switchdev_port_attr_set) { - err = ops->switchdev_port_attr_set(dev, attr, trans); - goto done; - } - - if (attr->flags & SWITCHDEV_F_NO_RECURSE) - goto done; - - /* Switch device port(s) may be stacked under - * bond/team/vlan dev, so recurse down to set attr on - * each port. - */ - - netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = __switchdev_port_attr_set(lower_dev, attr, trans); - if (err) - break; - } - -done: - if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP) - err = 0; - - return err; + return 0; } static int switchdev_port_attr_set_now(struct net_device *dev, @@ -257,8 +136,6 @@ static int switchdev_port_attr_set_now(struct net_device *dev, struct switchdev_trans trans; int err; - switchdev_trans_init(&trans); - /* Phase I: prepare for attr set. Driver/device should fail * here if there are going to be issues in the commit phase, * such as lack of resources or support. The driver/device @@ -267,18 +144,10 @@ static int switchdev_port_attr_set_now(struct net_device *dev, */ trans.ph_prepare = true; - err = __switchdev_port_attr_set(dev, attr, &trans); - if (err) { - /* Prepare phase failed: abort the transaction. Any - * resources reserved in the prepare phase are - * released. - */ - - if (err != -EOPNOTSUPP) - switchdev_trans_items_destroy(&trans); - + err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr, + &trans); + if (err) return err; - } /* Phase II: commit attr set. This cannot fail as a fault * of driver/device. If it does, it's a bug in the driver/device @@ -286,10 +155,10 @@ static int switchdev_port_attr_set_now(struct net_device *dev, */ trans.ph_prepare = false; - err = __switchdev_port_attr_set(dev, attr, &trans); + err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr, + &trans); WARN(err, "%s: Commit of attribute (id=%d) failed.\n", dev->name, attr->id); - switchdev_trans_items_warn_destroy(dev, &trans); return err; } @@ -353,42 +222,41 @@ static size_t switchdev_obj_size(const struct switchdev_obj *obj) return 0; } -static int __switchdev_port_obj_add(struct net_device *dev, - const struct switchdev_obj *obj, - struct switchdev_trans *trans) +static int switchdev_port_obj_notify(enum switchdev_notifier_type nt, + struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) { - const struct switchdev_ops *ops = dev->switchdev_ops; - struct net_device *lower_dev; - struct list_head *iter; - int err = -EOPNOTSUPP; - - if (ops && ops->switchdev_port_obj_add) - return ops->switchdev_port_obj_add(dev, obj, trans); + int rc; + int err; - /* Switch device port(s) may be stacked under - * bond/team/vlan dev, so recurse down to add object on - * each port. - */ + struct switchdev_notifier_port_obj_info obj_info = { + .obj = obj, + .trans = trans, + .handled = false, + }; - netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = __switchdev_port_obj_add(lower_dev, obj, trans); - if (err) - break; + rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack); + err = notifier_to_errno(rc); + if (err) { + WARN_ON(!obj_info.handled); + return err; } - - return err; + if (!obj_info.handled) + return -EOPNOTSUPP; + return 0; } static int switchdev_port_obj_add_now(struct net_device *dev, - const struct switchdev_obj *obj) + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) { struct switchdev_trans trans; int err; ASSERT_RTNL(); - switchdev_trans_init(&trans); - /* Phase I: prepare for obj add. Driver/device should fail * here if there are going to be issues in the commit phase, * such as lack of resources or support. The driver/device @@ -397,18 +265,10 @@ static int switchdev_port_obj_add_now(struct net_device *dev, */ trans.ph_prepare = true; - err = __switchdev_port_obj_add(dev, obj, &trans); - if (err) { - /* Prepare phase failed: abort the transaction. Any - * resources reserved in the prepare phase are - * released. - */ - - if (err != -EOPNOTSUPP) - switchdev_trans_items_destroy(&trans); - + err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, + dev, obj, &trans, extack); + if (err) return err; - } /* Phase II: commit obj add. This cannot fail as a fault * of driver/device. If it does, it's a bug in the driver/device @@ -416,9 +276,9 @@ static int switchdev_port_obj_add_now(struct net_device *dev, */ trans.ph_prepare = false; - err = __switchdev_port_obj_add(dev, obj, &trans); + err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, + dev, obj, &trans, extack); WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id); - switchdev_trans_items_warn_destroy(dev, &trans); return err; } @@ -429,7 +289,7 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev, const struct switchdev_obj *obj = data; int err; - err = switchdev_port_obj_add_now(dev, obj); + err = switchdev_port_obj_add_now(dev, obj, NULL); if (err && err != -EOPNOTSUPP) netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", err, obj->id); @@ -459,38 +319,21 @@ static int switchdev_port_obj_add_defer(struct net_device *dev, * in case SWITCHDEV_F_DEFER flag is not set. */ int switchdev_port_obj_add(struct net_device *dev, - const struct switchdev_obj *obj) + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) { if (obj->flags & SWITCHDEV_F_DEFER) return switchdev_port_obj_add_defer(dev, obj); ASSERT_RTNL(); - return switchdev_port_obj_add_now(dev, obj); + return switchdev_port_obj_add_now(dev, obj, extack); } EXPORT_SYMBOL_GPL(switchdev_port_obj_add); static int switchdev_port_obj_del_now(struct net_device *dev, const struct switchdev_obj *obj) { - const struct switchdev_ops *ops = dev->switchdev_ops; - struct net_device *lower_dev; - struct list_head *iter; - int err = -EOPNOTSUPP; - - if (ops && ops->switchdev_port_obj_del) - return ops->switchdev_port_obj_del(dev, obj); - - /* Switch device port(s) may be stacked under - * bond/team/vlan dev, so recurse down to delete object on - * each port. - */ - - netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = switchdev_port_obj_del_now(lower_dev, obj); - if (err) - break; - } - - return err; + return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL, + dev, obj, NULL, NULL); } static void switchdev_port_obj_del_deferred(struct net_device *dev, @@ -535,6 +378,7 @@ int switchdev_port_obj_del(struct net_device *dev, EXPORT_SYMBOL_GPL(switchdev_port_obj_del); static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); +static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); /** * register_switchdev_notifier - Register notifier @@ -569,29 +413,195 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); * Call all network notifier blocks. */ int call_switchdev_notifiers(unsigned long val, struct net_device *dev, - struct switchdev_notifier_info *info) + struct switchdev_notifier_info *info, + struct netlink_ext_ack *extack) { info->dev = dev; + info->extack = extack; return atomic_notifier_call_chain(&switchdev_notif_chain, val, info); } EXPORT_SYMBOL_GPL(call_switchdev_notifiers); -bool switchdev_port_same_parent_id(struct net_device *a, - struct net_device *b) +int register_switchdev_blocking_notifier(struct notifier_block *nb) { - struct switchdev_attr a_attr = { - .orig_dev = a, - .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, - }; - struct switchdev_attr b_attr = { - .orig_dev = b, - .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, - }; + struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; + + return blocking_notifier_chain_register(chain, nb); +} +EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); + +int unregister_switchdev_blocking_notifier(struct notifier_block *nb) +{ + struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; + + return blocking_notifier_chain_unregister(chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); + +int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, + struct switchdev_notifier_info *info, + struct netlink_ext_ack *extack) +{ + info->dev = dev; + info->extack = extack; + return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, + val, info); +} +EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); + +static int __switchdev_handle_port_obj_add(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + int (*add_cb)(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack)) +{ + struct netlink_ext_ack *extack; + struct net_device *lower_dev; + struct list_head *iter; + int err = -EOPNOTSUPP; + + extack = switchdev_notifier_info_to_extack(&port_obj_info->info); + + if (check_cb(dev)) { + /* This flag is only checked if the return value is success. */ + port_obj_info->handled = true; + return add_cb(dev, port_obj_info->obj, port_obj_info->trans, + extack); + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the + * unsupported devices, another driver might be able to handle them. But + * propagate to the callers any hard errors. + * + * If the driver does its own bookkeeping of stacked ports, it's not + * necessary to go through this helper. + */ + netdev_for_each_lower_dev(dev, lower_dev, iter) { + err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info, + check_cb, add_cb); + if (err && err != -EOPNOTSUPP) + return err; + } + + return err; +} + +int switchdev_handle_port_obj_add(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + int (*add_cb)(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack)) +{ + int err; - if (switchdev_port_attr_get(a, &a_attr) || - switchdev_port_attr_get(b, &b_attr)) - return false; + err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, + add_cb); + if (err == -EOPNOTSUPP) + err = 0; + return err; +} +EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add); + +static int __switchdev_handle_port_obj_del(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + int (*del_cb)(struct net_device *dev, + const struct switchdev_obj *obj)) +{ + struct net_device *lower_dev; + struct list_head *iter; + int err = -EOPNOTSUPP; + + if (check_cb(dev)) { + /* This flag is only checked if the return value is success. */ + port_obj_info->handled = true; + return del_cb(dev, port_obj_info->obj); + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the + * unsupported devices, another driver might be able to handle them. But + * propagate to the callers any hard errors. + * + * If the driver does its own bookkeeping of stacked ports, it's not + * necessary to go through this helper. + */ + netdev_for_each_lower_dev(dev, lower_dev, iter) { + err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info, + check_cb, del_cb); + if (err && err != -EOPNOTSUPP) + return err; + } + + return err; +} + +int switchdev_handle_port_obj_del(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + int (*del_cb)(struct net_device *dev, + const struct switchdev_obj *obj)) +{ + int err; + + err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, + del_cb); + if (err == -EOPNOTSUPP) + err = 0; + return err; +} +EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del); + +static int __switchdev_handle_port_attr_set(struct net_device *dev, + struct switchdev_notifier_port_attr_info *port_attr_info, + bool (*check_cb)(const struct net_device *dev), + int (*set_cb)(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans)) +{ + struct net_device *lower_dev; + struct list_head *iter; + int err = -EOPNOTSUPP; + + if (check_cb(dev)) { + port_attr_info->handled = true; + return set_cb(dev, port_attr_info->attr, + port_attr_info->trans); + } - return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid); + /* Switch ports might be stacked under e.g. a LAG. Ignore the + * unsupported devices, another driver might be able to handle them. But + * propagate to the callers any hard errors. + * + * If the driver does its own bookkeeping of stacked ports, it's not + * necessary to go through this helper. + */ + netdev_for_each_lower_dev(dev, lower_dev, iter) { + err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info, + check_cb, set_cb); + if (err && err != -EOPNOTSUPP) + return err; + } + + return err; +} + +int switchdev_handle_port_attr_set(struct net_device *dev, + struct switchdev_notifier_port_attr_info *port_attr_info, + bool (*check_cb)(const struct net_device *dev), + int (*set_cb)(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans)) +{ + int err; + + err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb, + set_cb); + if (err == -EOPNOTSUPP) + err = 0; + return err; } -EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id); +EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set); |