diff options
Diffstat (limited to 'net')
192 files changed, 3238 insertions, 1269 deletions
diff --git a/net/802/garp.c b/net/802/garp.c index 27f0ab146026..2d1ffc4d9462 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -414,7 +414,7 @@ static void garp_join_timer_arm(struct garp_applicant *app) static void garp_join_timer(struct timer_list *t) { - struct garp_applicant *app = from_timer(app, t, join_timer); + struct garp_applicant *app = timer_container_of(app, t, join_timer); spin_lock(&app->lock); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); diff --git a/net/802/mrp.c b/net/802/mrp.c index e0c96d0da8d5..23a88305f900 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -599,7 +599,7 @@ static void mrp_join_timer_arm(struct mrp_applicant *app) static void mrp_join_timer(struct timer_list *t) { - struct mrp_applicant *app = from_timer(app, t, join_timer); + struct mrp_applicant *app = timer_container_of(app, t, join_timer); spin_lock(&app->lock); mrp_mad_event(app, MRP_EVENT_TX); @@ -621,7 +621,7 @@ static void mrp_periodic_timer_arm(struct mrp_applicant *app) static void mrp_periodic_timer(struct timer_list *t) { - struct mrp_applicant *app = from_timer(app, t, periodic_timer); + struct mrp_applicant *app = timer_container_of(app, t, periodic_timer); spin_lock(&app->lock); if (likely(app->active)) { diff --git a/net/9p/client.c b/net/9p/client.c index 61461b9fa134..5c1ca57ccd28 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1704,7 +1704,7 @@ p9_client_write_subreq(struct netfs_io_subrequest *subreq) start, len, &subreq->io_iter); } if (IS_ERR(req)) { - netfs_write_subrequest_terminated(subreq, PTR_ERR(req), false); + netfs_write_subrequest_terminated(subreq, PTR_ERR(req)); return; } @@ -1712,7 +1712,7 @@ p9_client_write_subreq(struct netfs_io_subrequest *subreq) if (err) { trace_9p_protocol_dump(clnt, &req->rc); p9_req_put(clnt, req); - netfs_write_subrequest_terminated(subreq, err, false); + netfs_write_subrequest_terminated(subreq, err); return; } @@ -1724,7 +1724,7 @@ p9_client_write_subreq(struct netfs_io_subrequest *subreq) p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len); p9_req_put(clnt, req); - netfs_write_subrequest_terminated(subreq, written, false); + netfs_write_subrequest_terminated(subreq, written); } EXPORT_SYMBOL(p9_client_write_subreq); diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index b068651984fe..73ea7e67f05a 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -169,7 +169,7 @@ found: static void atalk_destroy_timer(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); if (sk_has_allocations(sk)) { sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME; diff --git a/net/atm/common.c b/net/atm/common.c index 9b75699992ff..d7f7976ea13a 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -635,6 +635,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) skb->dev = NULL; /* for paths shared with net_device interfaces */ if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) { + atm_return_tx(vcc, skb); kfree_skb(skb); error = -EFAULT; goto out; diff --git a/net/atm/lec.c b/net/atm/lec.c index ded2f0df2ee6..afb8d3eb2185 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -124,6 +124,7 @@ static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* Device structures */ static struct net_device *dev_lec[MAX_LEC_ITF]; +static DEFINE_MUTEX(lec_mutex); #if IS_ENABLED(CONFIG_BRIDGE) static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) @@ -685,6 +686,7 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) int bytes_left; struct atmlec_ioc ioc_data; + lockdep_assert_held(&lec_mutex); /* Lecd must be up in this case */ bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); if (bytes_left != 0) @@ -710,6 +712,7 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) static int lec_mcast_attach(struct atm_vcc *vcc, int arg) { + lockdep_assert_held(&lec_mutex); if (arg < 0 || arg >= MAX_LEC_ITF) return -EINVAL; arg = array_index_nospec(arg, MAX_LEC_ITF); @@ -725,6 +728,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) int i; struct lec_priv *priv; + lockdep_assert_held(&lec_mutex); if (arg < 0) arg = 0; if (arg >= MAX_LEC_ITF) @@ -742,6 +746,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); if (register_netdev(dev_lec[i])) { free_netdev(dev_lec[i]); + dev_lec[i] = NULL; return -EINVAL; } @@ -904,7 +909,6 @@ static void *lec_itf_walk(struct lec_state *state, loff_t *l) v = (dev && netdev_priv(dev)) ? lec_priv_walk(state, l, netdev_priv(dev)) : NULL; if (!v && dev) { - dev_put(dev); /* Partial state reset for the next time we get called */ dev = NULL; } @@ -928,6 +932,7 @@ static void *lec_seq_start(struct seq_file *seq, loff_t *pos) { struct lec_state *state = seq->private; + mutex_lock(&lec_mutex); state->itf = 0; state->dev = NULL; state->locked = NULL; @@ -945,8 +950,9 @@ static void lec_seq_stop(struct seq_file *seq, void *v) if (state->dev) { spin_unlock_irqrestore(&state->locked->lec_arp_lock, state->flags); - dev_put(state->dev); + state->dev = NULL; } + mutex_unlock(&lec_mutex); } static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos) @@ -1003,6 +1009,7 @@ static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return -ENOIOCTLCMD; } + mutex_lock(&lec_mutex); switch (cmd) { case ATMLEC_CTRL: err = lecd_attach(vcc, (int)arg); @@ -1017,6 +1024,7 @@ static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) break; } + mutex_unlock(&lec_mutex); return err; } @@ -1551,7 +1559,7 @@ static void lec_arp_expire_arp(struct timer_list *t) { struct lec_arp_table *entry; - entry = from_timer(entry, t, timer); + entry = timer_container_of(entry, t, timer); pr_debug("\n"); if (entry->status == ESI_ARP_PENDING) { @@ -1572,7 +1580,8 @@ static void lec_arp_expire_arp(struct timer_list *t) static void lec_arp_expire_vcc(struct timer_list *t) { unsigned long flags; - struct lec_arp_table *to_remove = from_timer(to_remove, t, timer); + struct lec_arp_table *to_remove = timer_container_of(to_remove, t, + timer); struct lec_priv *priv = to_remove->priv; timer_delete(&to_remove->timer); diff --git a/net/atm/raw.c b/net/atm/raw.c index 2b5f78a7ec3e..1e6511ec842c 100644 --- a/net/atm/raw.c +++ b/net/atm/raw.c @@ -36,7 +36,7 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb) pr_debug("(%d) %d -= %d\n", vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize); - WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc)); + atm_return_tx(vcc, skb); dev_kfree_skb_any(skb); sk->sk_write_space(sk); } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index b790bb92ed1c..6ef8b2a57a9b 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -287,7 +287,7 @@ void ax25_destroy_socket(ax25_cb *); */ static void ax25_destroy_timer(struct timer_list *t) { - ax25_cb *ax25 = from_timer(ax25, t, dtimer); + ax25_cb *ax25 = timer_container_of(ax25, t, dtimer); struct sock *sk; sk=ax25->sk; diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c index 8d9fba069001..0c9e7775aa54 100644 --- a/net/ax25/ax25_ds_timer.c +++ b/net/ax25/ax25_ds_timer.c @@ -64,7 +64,7 @@ void ax25_ds_set_timer(ax25_dev *ax25_dev) static void ax25_ds_timeout(struct timer_list *t) { - ax25_dev *ax25_dev = from_timer(ax25_dev, t, dama.slave_timer); + ax25_dev *ax25_dev = timer_container_of(ax25_dev, t, dama.slave_timer); ax25_cb *ax25; if (ax25_dev == NULL || !ax25_dev->dama.slave) diff --git a/net/ax25/ax25_timer.c b/net/ax25/ax25_timer.c index 3891a3923d6c..a69bfbc8b679 100644 --- a/net/ax25/ax25_timer.c +++ b/net/ax25/ax25_timer.c @@ -121,7 +121,7 @@ EXPORT_SYMBOL(ax25_display_timer); static void ax25_heartbeat_expiry(struct timer_list *t) { int proto = AX25_PROTO_STD_SIMPLEX; - ax25_cb *ax25 = from_timer(ax25, t, timer); + ax25_cb *ax25 = timer_container_of(ax25, t, timer); if (ax25->ax25_dev) proto = ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]; @@ -145,7 +145,7 @@ static void ax25_heartbeat_expiry(struct timer_list *t) static void ax25_t1timer_expiry(struct timer_list *t) { - ax25_cb *ax25 = from_timer(ax25, t, t1timer); + ax25_cb *ax25 = timer_container_of(ax25, t, t1timer); switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: @@ -164,7 +164,7 @@ static void ax25_t1timer_expiry(struct timer_list *t) static void ax25_t2timer_expiry(struct timer_list *t) { - ax25_cb *ax25 = from_timer(ax25, t, t2timer); + ax25_cb *ax25 = timer_container_of(ax25, t, t2timer); switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: @@ -183,7 +183,7 @@ static void ax25_t2timer_expiry(struct timer_list *t) static void ax25_t3timer_expiry(struct timer_list *t) { - ax25_cb *ax25 = from_timer(ax25, t, t3timer); + ax25_cb *ax25 = timer_container_of(ax25, t, t3timer); switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: @@ -204,7 +204,7 @@ static void ax25_t3timer_expiry(struct timer_list *t) static void ax25_idletimer_expiry(struct timer_list *t) { - ax25_cb *ax25 = from_timer(ax25, t, idletimer); + ax25_cb *ax25 = timer_container_of(ax25, t, idletimer); switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index adbadb436033..350b149e48be 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -485,7 +485,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars) */ static void batadv_tp_sender_timeout(struct timer_list *t) { - struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer); + struct batadv_tp_vars *tp_vars = timer_container_of(tp_vars, t, timer); struct batadv_priv *bat_priv = tp_vars->bat_priv; if (atomic_read(&tp_vars->sending) == 0) @@ -1101,7 +1101,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars) */ static void batadv_tp_receiver_shutdown(struct timer_list *t) { - struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer); + struct batadv_tp_vars *tp_vars = timer_container_of(tp_vars, t, timer); struct batadv_tp_unacked *un, *safe; struct batadv_priv *bat_priv; diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c index 1bc51e2b05a3..3f72111ba651 100644 --- a/net/bluetooth/eir.c +++ b/net/bluetooth/eir.c @@ -242,7 +242,7 @@ u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) return ad_len; } -u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr, u8 size) { struct adv_info *adv = NULL; u8 ad_len = 0, flags = 0; @@ -286,7 +286,7 @@ u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) /* If flags would still be empty, then there is no need to * include the "Flags" AD field". */ - if (flags) { + if (flags && (ad_len + eir_precalc_len(1) <= size)) { ptr[0] = 0x02; ptr[1] = EIR_FLAGS; ptr[2] = flags; @@ -316,7 +316,8 @@ skip_flags: } /* Provide Tx Power only if we can provide a valid value for it */ - if (adv_tx_power != HCI_TX_POWER_INVALID) { + if (adv_tx_power != HCI_TX_POWER_INVALID && + (ad_len + eir_precalc_len(1) <= size)) { ptr[0] = 0x02; ptr[1] = EIR_TX_POWER; ptr[2] = (u8)adv_tx_power; @@ -366,17 +367,19 @@ u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr) void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len) { - while ((eir = eir_get_data(eir, eir_len, EIR_SERVICE_DATA, len))) { + size_t dlen; + + while ((eir = eir_get_data(eir, eir_len, EIR_SERVICE_DATA, &dlen))) { u16 value = get_unaligned_le16(eir); if (uuid == value) { if (len) - *len -= 2; + *len = dlen - 2; return &eir[2]; } - eir += *len; - eir_len -= *len; + eir += dlen; + eir_len -= dlen; } return NULL; diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h index 5c89a05e8b29..9372db83f912 100644 --- a/net/bluetooth/eir.h +++ b/net/bluetooth/eir.h @@ -9,7 +9,7 @@ void eir_create(struct hci_dev *hdev, u8 *data); -u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr, u8 size); u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 99efeed6a766..4f379184df5b 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1501,8 +1501,8 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) /* This function requires the caller holds hdev->lock */ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, - struct bt_iso_qos *qos, __u8 base_len, - __u8 *base) + __u8 sid, struct bt_iso_qos *qos, + __u8 base_len, __u8 *base) { struct hci_conn *conn; int err; @@ -1543,6 +1543,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, return conn; conn->state = BT_CONNECT; + conn->sid = sid; hci_conn_hold(conn); return conn; @@ -2062,7 +2063,8 @@ static int create_big_sync(struct hci_dev *hdev, void *data) if (qos->bcast.bis) sync_interval = interval * 4; - err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len, + err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->sid, + conn->le_per_adv_data_len, conn->le_per_adv_data, flags, interval, interval, sync_interval); if (err) @@ -2134,7 +2136,7 @@ static void create_big_complete(struct hci_dev *hdev, void *data, int err) } } -struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, +struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid, struct bt_iso_qos *qos, __u8 base_len, __u8 *base) { @@ -2156,7 +2158,7 @@ struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, base, base_len); /* We need hci_conn object using the BDADDR_ANY as dst */ - conn = hci_add_bis(hdev, dst, qos, base_len, eir); + conn = hci_add_bis(hdev, dst, sid, qos, base_len, eir); if (IS_ERR(conn)) return conn; @@ -2207,20 +2209,35 @@ static void bis_mark_per_adv(struct hci_conn *conn, void *data) } struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, - __u8 dst_type, struct bt_iso_qos *qos, + __u8 dst_type, __u8 sid, + struct bt_iso_qos *qos, __u8 base_len, __u8 *base) { struct hci_conn *conn; int err; struct iso_list_data data; - conn = hci_bind_bis(hdev, dst, qos, base_len, base); + conn = hci_bind_bis(hdev, dst, sid, qos, base_len, base); if (IS_ERR(conn)) return conn; if (conn->state == BT_CONNECTED) return conn; + /* Check if SID needs to be allocated then search for the first + * available. + */ + if (conn->sid == HCI_SID_INVALID) { + u8 sid; + + for (sid = 0; sid <= 0x0f; sid++) { + if (!hci_find_adv_sid(hdev, sid)) { + conn->sid = sid; + break; + } + } + } + data.big = qos->bcast.big; data.bis = qos->bcast.bis; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 3b49828160b7..07a8b4281a39 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1585,6 +1585,19 @@ struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) } /* This function requires the caller holds hdev->lock */ +struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid) +{ + struct adv_info *adv; + + list_for_each_entry(adv, &hdev->adv_instances, list) { + if (adv->sid == sid) + return adv; + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *cur_instance; @@ -1736,7 +1749,7 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, } /* This function requires the caller holds hdev->lock */ -struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, +struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid, u32 flags, u8 data_len, u8 *data, u32 min_interval, u32 max_interval) { @@ -1748,6 +1761,7 @@ struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, if (IS_ERR(adv)) return adv; + adv->sid = sid; adv->periodic = true; adv->per_adv_data_len = data_len; @@ -1877,10 +1891,8 @@ void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) if (monitor->handle) idr_remove(&hdev->adv_monitors_idr, monitor->handle); - if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { + if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) hdev->adv_monitors_cnt--; - mgmt_adv_monitor_removed(hdev, monitor->handle); - } kfree(monitor); } @@ -2487,6 +2499,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); + mutex_init(&hdev->mgmt_pending_lock); ida_init(&hdev->unset_handle_ida); @@ -3417,23 +3430,18 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) bt_dev_err(hdev, "link tx timeout"); - rcu_read_lock(); + hci_dev_lock(hdev); /* Kill stalled connections */ - list_for_each_entry_rcu(c, &h->list, list) { + list_for_each_entry(c, &h->list, list) { if (c->type == type && c->sent) { bt_dev_err(hdev, "killing stalled connection %pMR", &c->dst); - /* hci_disconnect might sleep, so, we have to release - * the RCU read lock before calling it. - */ - rcu_read_unlock(); hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); - rcu_read_lock(); } } - rcu_read_unlock(); + hci_dev_unlock(hdev); } static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 62d1ff951ebe..6687f2a4d1eb 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -1261,10 +1261,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) hci_cpu_to_le24(adv->min_interval, cp.min_interval); hci_cpu_to_le24(adv->max_interval, cp.max_interval); cp.tx_power = adv->tx_power; + cp.sid = adv->sid; } else { hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; + cp.sid = 0x00; } secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); @@ -1559,7 +1561,8 @@ static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) { u8 bid[3]; - u8 ad[4 + 3]; + u8 ad[HCI_MAX_EXT_AD_LENGTH]; + u8 len; /* Skip if NULL adv as instance 0x00 is used for general purpose * advertising so it cannot used for the likes of Broadcast Announcement @@ -1585,14 +1588,16 @@ static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) /* Generate Broadcast ID */ get_random_bytes(bid, sizeof(bid)); - eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); - hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); + len = eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); + memcpy(ad + len, adv->adv_data, adv->adv_data_len); + hci_set_adv_instance_data(hdev, adv->instance, len + adv->adv_data_len, + ad, 0, NULL); return hci_update_adv_data_sync(hdev, adv->instance); } -int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, - u8 *data, u32 flags, u16 min_interval, +int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 sid, + u8 data_len, u8 *data, u32 flags, u16 min_interval, u16 max_interval, u16 sync_interval) { struct adv_info *adv = NULL; @@ -1603,9 +1608,28 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, if (instance) { adv = hci_find_adv_instance(hdev, instance); - /* Create an instance if that could not be found */ - if (!adv) { - adv = hci_add_per_instance(hdev, instance, flags, + if (adv) { + if (sid != HCI_SID_INVALID && adv->sid != sid) { + /* If the SID don't match attempt to find by + * SID. + */ + adv = hci_find_adv_sid(hdev, sid); + if (!adv) { + bt_dev_err(hdev, + "Unable to find adv_info"); + return -EINVAL; + } + } + + /* Turn it into periodic advertising */ + adv->periodic = true; + adv->per_adv_data_len = data_len; + if (data) + memcpy(adv->per_adv_data, data, data_len); + adv->flags = flags; + } else if (!adv) { + /* Create an instance if that could not be found */ + adv = hci_add_per_instance(hdev, instance, sid, flags, data_len, data, sync_interval, sync_interval); @@ -1812,7 +1836,8 @@ static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) return 0; } - len = eir_create_adv_data(hdev, instance, pdu->data); + len = eir_create_adv_data(hdev, instance, pdu->data, + HCI_MAX_EXT_AD_LENGTH); pdu->length = len; pdu->handle = adv ? adv->handle : instance; @@ -1843,7 +1868,7 @@ static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) memset(&cp, 0, sizeof(cp)); - len = eir_create_adv_data(hdev, instance, cp.data); + len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data)); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index fc5af8639b1e..6724adce615b 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -405,7 +405,7 @@ static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum, static void hidp_idle_timeout(struct timer_list *t) { - struct hidp_session *session = from_timer(session, t, timer); + struct hidp_session *session = timer_container_of(session, t, timer); /* The HIDP user-space API only contains calls to add and remove * devices. There is no way to forward events of any kind. Therefore, diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 6e2c752aaa8f..3c2c98eecc62 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -336,7 +336,7 @@ static int iso_connect_bis(struct sock *sk) struct hci_dev *hdev; int err; - BT_DBG("%pMR", &iso_pi(sk)->src); + BT_DBG("%pMR (SID 0x%2.2x)", &iso_pi(sk)->src, iso_pi(sk)->bc_sid); hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, iso_pi(sk)->src_type); @@ -365,7 +365,7 @@ static int iso_connect_bis(struct sock *sk) /* Just bind if DEFER_SETUP has been set */ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { - hcon = hci_bind_bis(hdev, &iso_pi(sk)->dst, + hcon = hci_bind_bis(hdev, &iso_pi(sk)->dst, iso_pi(sk)->bc_sid, &iso_pi(sk)->qos, iso_pi(sk)->base_len, iso_pi(sk)->base); if (IS_ERR(hcon)) { @@ -375,12 +375,16 @@ static int iso_connect_bis(struct sock *sk) } else { hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), - &iso_pi(sk)->qos, iso_pi(sk)->base_len, - iso_pi(sk)->base); + iso_pi(sk)->bc_sid, &iso_pi(sk)->qos, + iso_pi(sk)->base_len, iso_pi(sk)->base); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } + + /* Update SID if it was not set */ + if (iso_pi(sk)->bc_sid == HCI_SID_INVALID) + iso_pi(sk)->bc_sid = hcon->sid; } conn = iso_conn_add(hcon); @@ -1337,10 +1341,13 @@ static int iso_sock_getname(struct socket *sock, struct sockaddr *addr, addr->sa_family = AF_BLUETOOTH; if (peer) { + struct hci_conn *hcon = iso_pi(sk)->conn ? + iso_pi(sk)->conn->hcon : NULL; + bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst); sa->iso_bdaddr_type = iso_pi(sk)->dst_type; - if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) { + if (hcon && hcon->type == BIS_LINK) { sa->iso_bc->bc_sid = iso_pi(sk)->bc_sid; sa->iso_bc->bc_num_bis = iso_pi(sk)->bc_num_bis; memcpy(sa->iso_bc->bc_bis, iso_pi(sk)->bc_bis, diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 042d3ac3b4a3..a5bde5db58ef 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -4870,7 +4870,8 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, if (!smp_sufficient_security(conn->hcon, pchan->sec_level, SMP_ALLOW_STK)) { - result = L2CAP_CR_LE_AUTHENTICATION; + result = pchan->sec_level == BT_SECURITY_MEDIUM ? + L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION; chan = NULL; goto response_unlock; } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 261926dccc7e..d540f7b4f75f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1447,22 +1447,17 @@ static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data) send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); - list_del(&cmd->list); - if (match->sk == NULL) { match->sk = cmd->sk; sock_hold(match->sk); } - - mgmt_pending_free(cmd); } static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data) { u8 *status = data; - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); - mgmt_pending_remove(cmd); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status); } static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) @@ -1476,8 +1471,6 @@ static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) if (cmd->cmd_complete) { cmd->cmd_complete(cmd, match->mgmt_status); - mgmt_pending_remove(cmd); - return; } @@ -1486,13 +1479,13 @@ static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { - return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, cmd->param, cmd->param_len); } static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { - return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, cmd->param, sizeof(struct mgmt_addr_info)); } @@ -1532,7 +1525,7 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, if (err) { u8 mgmt_err = mgmt_status(err); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); goto done; } @@ -1707,7 +1700,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, if (err) { u8 mgmt_err = mgmt_status(err); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); goto done; } @@ -1943,8 +1936,8 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) new_settings(hdev, NULL); } - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, - &mgmt_err); + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, + cmd_status_rsp, &mgmt_err); return; } @@ -1954,7 +1947,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); } - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match); if (changed) new_settings(hdev, match.sk); @@ -2074,12 +2067,12 @@ static void set_le_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); if (status) { - mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, - &status); + mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp, + &status); return; } - mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match); new_settings(hdev, match.sk); @@ -2138,7 +2131,7 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err) struct sock *sk = cmd->sk; if (status) { - mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, + mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true, cmd_status_rsp, &status); return; } @@ -2566,7 +2559,8 @@ static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev, struct mgmt_pending_cmd *cmd; int err; - if (len < sizeof(*cp)) + if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) + + le16_to_cpu(cp->params_len))) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, MGMT_STATUS_INVALID_PARAMS); @@ -2637,7 +2631,7 @@ static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), hdev->dev_class, 3); mgmt_pending_free(cmd); @@ -3426,7 +3420,7 @@ static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status) bacpy(&rp.addr.bdaddr, &conn->dst); rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); - err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, + err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE, status, &rp, sizeof(rp)); /* So we don't get further callbacks for this connection */ @@ -5107,24 +5101,14 @@ static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev, mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk); } -void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle) +static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev, + __le16 handle) { struct mgmt_ev_adv_monitor_removed ev; - struct mgmt_pending_cmd *cmd; - struct sock *sk_skip = NULL; - struct mgmt_cp_remove_adv_monitor *cp; - - cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); - if (cmd) { - cp = cmd->param; - - if (cp->monitor_handle) - sk_skip = cmd->sk; - } - ev.monitor_handle = cpu_to_le16(handle); + ev.monitor_handle = handle; - mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip); + mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk); } static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, @@ -5195,7 +5179,7 @@ static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, hci_update_passive_scan(hdev); } - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); @@ -5226,8 +5210,7 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, if (pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || - pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) || - pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) { + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } @@ -5397,8 +5380,7 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_remove_adv_monitor *cp; - if (status == -ECANCELED || - cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) + if (status == -ECANCELED) return; hci_dev_lock(hdev); @@ -5407,12 +5389,14 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, rp.monitor_handle = cp->monitor_handle; - if (!status) + if (!status) { + mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle); hci_update_passive_scan(hdev); + } - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); hci_dev_unlock(hdev); bt_dev_dbg(hdev, "remove monitor %d complete, status %d", @@ -5422,10 +5406,6 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; - - if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) - return -ECANCELED; - struct mgmt_cp_remove_adv_monitor *cp = cmd->param; u16 handle = __le16_to_cpu(cp->monitor_handle); @@ -5444,14 +5424,13 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_LE, hdev) || - pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); if (!cmd) { status = MGMT_STATUS_NO_RESOURCES; goto unlock; @@ -5461,7 +5440,7 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, mgmt_remove_adv_monitor_complete); if (err) { - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); if (err == -ENOMEM) status = MGMT_STATUS_NO_RESOURCES; @@ -5791,7 +5770,7 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev)) return; - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_remove(cmd); @@ -6012,7 +5991,7 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_remove(cmd); @@ -6237,7 +6216,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) u8 status = mgmt_status(err); if (status) { - mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, + mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, cmd_status_rsp, &status); return; } @@ -6247,7 +6226,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) else hci_dev_clear_flag(hdev, HCI_ADVERTISING); - mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, + mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp, &match); new_settings(hdev, match.sk); @@ -6591,7 +6570,7 @@ static void set_bredr_complete(struct hci_dev *hdev, void *data, int err) */ hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); } else { send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); new_settings(hdev, cmd->sk); @@ -6728,7 +6707,7 @@ static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err) if (err) { u8 mgmt_err = mgmt_status(err); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); goto done; } @@ -7175,7 +7154,7 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err) rp.max_tx_power = HCI_TX_POWER_INVALID; } - mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status, &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -7335,7 +7314,7 @@ static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err) } complete: - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -8585,10 +8564,10 @@ static void add_advertising_complete(struct hci_dev *hdev, void *data, int err) rp.instance = cp->instance; if (err) - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); else - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); add_adv_complete(hdev, cmd->sk, cp->instance, err); @@ -8776,10 +8755,10 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data, hci_remove_adv_instance(hdev, cp->instance); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); } else { - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); } @@ -8926,10 +8905,10 @@ static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err) rp.instance = cp->instance; if (err) - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); else - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -9088,10 +9067,10 @@ static void remove_advertising_complete(struct hci_dev *hdev, void *data, rp.instance = cp->instance; if (err) - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); else - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -9363,7 +9342,7 @@ void mgmt_index_removed(struct hci_dev *hdev) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match); + mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match); if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, @@ -9401,7 +9380,8 @@ void mgmt_power_on(struct hci_dev *hdev, int err) hci_update_passive_scan(hdev); } - mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp, + &match); new_settings(hdev, match.sk); @@ -9416,7 +9396,8 @@ void __mgmt_power_off(struct hci_dev *hdev) struct cmd_lookup match = { NULL, hdev }; u8 zero_cod[] = { 0, 0, 0 }; - mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp, + &match); /* If the power off is because of hdev unregistration let * use the appropriate INVALID_INDEX status. Otherwise use @@ -9430,7 +9411,7 @@ void __mgmt_power_off(struct hci_dev *hdev) else match.mgmt_status = MGMT_STATUS_NOT_POWERED; - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match); + mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match); if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, @@ -9671,7 +9652,6 @@ static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); cmd->cmd_complete(cmd, 0); - mgmt_pending_remove(cmd); } bool mgmt_powering_down(struct hci_dev *hdev) @@ -9727,8 +9707,8 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, struct mgmt_cp_disconnect *cp; struct mgmt_pending_cmd *cmd; - mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, - hdev); + mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true, + unpair_device_rsp, hdev); cmd = pending_find(MGMT_OP_DISCONNECT, hdev); if (!cmd) @@ -9921,7 +9901,7 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) if (status) { u8 mgmt_err = mgmt_status(status); - mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, + mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true, cmd_status_rsp, &mgmt_err); return; } @@ -9931,8 +9911,8 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) else changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY); - mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, - &match); + mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true, + settings_rsp, &match); if (changed) new_settings(hdev, match.sk); @@ -9956,9 +9936,12 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, { struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; - mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); - mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); - mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); + mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup, + &match); + mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup, + &match); + mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup, + &match); if (!status) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c index 3713ff490c65..a88a07da3947 100644 --- a/net/bluetooth/mgmt_util.c +++ b/net/bluetooth/mgmt_util.c @@ -217,30 +217,47 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, struct hci_dev *hdev) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd, *tmp; + + mutex_lock(&hdev->mgmt_pending_lock); - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { if (hci_sock_get_channel(cmd->sk) != channel) continue; - if (cmd->opcode == opcode) + + if (cmd->opcode == opcode) { + mutex_unlock(&hdev->mgmt_pending_lock); return cmd; + } } + mutex_unlock(&hdev->mgmt_pending_lock); + return NULL; } -void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data) { struct mgmt_pending_cmd *cmd, *tmp; + mutex_lock(&hdev->mgmt_pending_lock); + list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { if (opcode > 0 && cmd->opcode != opcode) continue; + if (remove) + list_del(&cmd->list); + cb(cmd, data); + + if (remove) + mgmt_pending_free(cmd); } + + mutex_unlock(&hdev->mgmt_pending_lock); } struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, @@ -254,7 +271,7 @@ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, return NULL; cmd->opcode = opcode; - cmd->index = hdev->id; + cmd->hdev = hdev; cmd->param = kmemdup(data, len, GFP_KERNEL); if (!cmd->param) { @@ -280,7 +297,9 @@ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, if (!cmd) return NULL; + mutex_lock(&hdev->mgmt_pending_lock); list_add_tail(&cmd->list, &hdev->mgmt_pending); + mutex_unlock(&hdev->mgmt_pending_lock); return cmd; } @@ -294,7 +313,10 @@ void mgmt_pending_free(struct mgmt_pending_cmd *cmd) void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) { + mutex_lock(&cmd->hdev->mgmt_pending_lock); list_del(&cmd->list); + mutex_unlock(&cmd->hdev->mgmt_pending_lock); + mgmt_pending_free(cmd); } diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h index f2ba994ab1d8..024e51dd6937 100644 --- a/net/bluetooth/mgmt_util.h +++ b/net/bluetooth/mgmt_util.h @@ -33,7 +33,7 @@ struct mgmt_mesh_tx { struct mgmt_pending_cmd { struct list_head list; u16 opcode; - int index; + struct hci_dev *hdev; void *param; size_t param_len; struct sock *sk; @@ -54,7 +54,7 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, struct hci_dev *hdev); -void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data); struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 20ea7dba0a9a..3b8f39618d65 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -235,7 +235,7 @@ static int rfcomm_check_security(struct rfcomm_dlc *d) static void rfcomm_session_timeout(struct timer_list *t) { - struct rfcomm_session *s = from_timer(s, t, timer); + struct rfcomm_session *s = timer_container_of(s, t, timer); BT_DBG("session %p state %ld", s, s->state); @@ -260,7 +260,7 @@ static void rfcomm_session_clear_timer(struct rfcomm_session *s) /* ---- RFCOMM DLCs ---- */ static void rfcomm_dlc_timeout(struct timer_list *t) { - struct rfcomm_dlc *d = from_timer(d, t, timer); + struct rfcomm_dlc *d = timer_container_of(d, t, timer); BT_DBG("dlc %p state %ld", d, d->state); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 7cb192cbd65f..aaf13a7d58ed 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -569,6 +569,11 @@ __bpf_kfunc u32 bpf_fentry_test9(u32 *a) return *a; } +int noinline bpf_fentry_test10(const void *a) +{ + return (long)a; +} + void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo) { } @@ -699,7 +704,8 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || bpf_fentry_test8(&arg) != 0 || - bpf_fentry_test9(&retval) != 0) + bpf_fentry_test9(&retval) != 0 || + bpf_fentry_test10((void *)0) != 0) goto out; break; case BPF_MODIFY_RETURN: diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index fb6f7f2001c9..0224ef3dfec0 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -648,7 +648,7 @@ static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) static void br_multicast_group_expired(struct timer_list *t) { - struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); + struct net_bridge_mdb_entry *mp = timer_container_of(mp, t, timer); struct net_bridge *br = mp->br; spin_lock(&br->multicast_lock); @@ -856,7 +856,7 @@ static void br_multicast_find_del_pg(struct net_bridge *br, static void br_multicast_port_group_expired(struct timer_list *t) { - struct net_bridge_port_group *pg = from_timer(pg, t, timer); + struct net_bridge_port_group *pg = timer_container_of(pg, t, timer); struct net_bridge_group_src *src_ent; struct net_bridge *br = pg->key.port->br; struct hlist_node *tmp; @@ -1314,7 +1314,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, static void br_multicast_group_src_expired(struct timer_list *t) { - struct net_bridge_group_src *src = from_timer(src, t, timer); + struct net_bridge_group_src *src = timer_container_of(src, t, timer); struct net_bridge_port_group *pg; struct net_bridge *br = src->br; @@ -1667,8 +1667,8 @@ out: static void br_ip4_multicast_router_expired(struct timer_list *t) { - struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, - ip4_mc_router_timer); + struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t, + ip4_mc_router_timer); br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist); } @@ -1676,8 +1676,8 @@ static void br_ip4_multicast_router_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_router_expired(struct timer_list *t) { - struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, - ip6_mc_router_timer); + struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t, + ip6_mc_router_timer); br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist); } @@ -1713,8 +1713,8 @@ out: static void br_ip4_multicast_local_router_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip4_mc_router_timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip4_mc_router_timer); br_multicast_local_router_expired(brmctx, t); } @@ -1722,8 +1722,8 @@ static void br_ip4_multicast_local_router_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_local_router_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip6_mc_router_timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip6_mc_router_timer); br_multicast_local_router_expired(brmctx, t); } @@ -1746,8 +1746,8 @@ out: static void br_ip4_multicast_querier_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip4_other_query.timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip4_other_query.timer); br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query); } @@ -1755,8 +1755,8 @@ static void br_ip4_multicast_querier_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_querier_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip6_other_query.timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip6_other_query.timer); br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query); } @@ -1918,8 +1918,8 @@ out: static void br_ip4_multicast_port_query_expired(struct timer_list *t) { - struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, - ip4_own_query.timer); + struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t, + ip4_own_query.timer); br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query); } @@ -1927,8 +1927,8 @@ static void br_ip4_multicast_port_query_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_port_query_expired(struct timer_list *t) { - struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, - ip6_own_query.timer); + struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t, + ip6_own_query.timer); br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query); } @@ -1936,7 +1936,8 @@ static void br_ip6_multicast_port_query_expired(struct timer_list *t) static void br_multicast_port_group_rexmit(struct timer_list *t) { - struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); + struct net_bridge_port_group *pg = timer_container_of(pg, t, + rexmit_timer); struct bridge_mcast_other_query *other_query = NULL; struct net_bridge *br = pg->key.port->br; struct net_bridge_mcast_port *pmctx; @@ -4056,8 +4057,8 @@ out: static void br_ip4_multicast_query_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip4_own_query.timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip4_own_query.timer); br_multicast_query_expired(brmctx, &brmctx->ip4_own_query, &brmctx->ip4_querier); @@ -4066,8 +4067,8 @@ static void br_ip4_multicast_query_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_query_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip6_own_query.timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip6_own_query.timer); br_multicast_query_expired(brmctx, &brmctx->ip6_own_query, &brmctx->ip6_querier); diff --git a/net/bridge/br_multicast_eht.c b/net/bridge/br_multicast_eht.c index c126aa4e7551..adfd74102019 100644 --- a/net/bridge/br_multicast_eht.c +++ b/net/bridge/br_multicast_eht.c @@ -207,7 +207,9 @@ void br_multicast_eht_clean_sets(struct net_bridge_port_group *pg) static void br_multicast_eht_set_entry_expired(struct timer_list *t) { - struct net_bridge_group_eht_set_entry *set_h = from_timer(set_h, t, timer); + struct net_bridge_group_eht_set_entry *set_h = timer_container_of(set_h, + t, + timer); struct net_bridge *br = set_h->br; spin_lock(&br->multicast_lock); @@ -223,8 +225,9 @@ out: static void br_multicast_eht_set_expired(struct timer_list *t) { - struct net_bridge_group_eht_set *eht_set = from_timer(eht_set, t, - timer); + struct net_bridge_group_eht_set *eht_set = timer_container_of(eht_set, + t, + timer); struct net_bridge *br = eht_set->br; spin_lock(&br->multicast_lock); diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 27bf1979b909..e5d453305381 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -29,7 +29,7 @@ static int br_is_designated_for_some_port(const struct net_bridge *br) static void br_hello_timer_expired(struct timer_list *t) { - struct net_bridge *br = from_timer(br, t, hello_timer); + struct net_bridge *br = timer_container_of(br, t, hello_timer); br_debug(br, "hello timer expired\n"); spin_lock(&br->lock); @@ -45,7 +45,8 @@ static void br_hello_timer_expired(struct timer_list *t) static void br_message_age_timer_expired(struct timer_list *t) { - struct net_bridge_port *p = from_timer(p, t, message_age_timer); + struct net_bridge_port *p = timer_container_of(p, t, + message_age_timer); struct net_bridge *br = p->br; const bridge_id *id = &p->designated_bridge; int was_root; @@ -78,7 +79,8 @@ static void br_message_age_timer_expired(struct timer_list *t) static void br_forward_delay_timer_expired(struct timer_list *t) { - struct net_bridge_port *p = from_timer(p, t, forward_delay_timer); + struct net_bridge_port *p = timer_container_of(p, t, + forward_delay_timer); struct net_bridge *br = p->br; br_debug(br, "port %u(%s) forward delay timer\n", @@ -102,7 +104,7 @@ static void br_forward_delay_timer_expired(struct timer_list *t) static void br_tcn_timer_expired(struct timer_list *t) { - struct net_bridge *br = from_timer(br, t, tcn_timer); + struct net_bridge *br = timer_container_of(br, t, tcn_timer); br_debug(br, "tcn timer expired\n"); spin_lock(&br->lock); @@ -116,7 +118,8 @@ static void br_tcn_timer_expired(struct timer_list *t) static void br_topology_change_timer_expired(struct timer_list *t) { - struct net_bridge *br = from_timer(br, t, topology_change_timer); + struct net_bridge *br = timer_container_of(br, t, + topology_change_timer); br_debug(br, "topo change timer expired\n"); spin_lock(&br->lock); @@ -127,7 +130,7 @@ static void br_topology_change_timer_expired(struct timer_list *t) static void br_hold_timer_expired(struct timer_list *t) { - struct net_bridge_port *p = from_timer(p, t, hold_timer); + struct net_bridge_port *p = timer_container_of(p, t, hold_timer); br_debug(p->br, "port %u(%s) hold timer expired\n", (unsigned int) p->port_no, p->dev->name); diff --git a/net/can/af_can.c b/net/can/af_can.c index 4aab7033c933..b2387a46794a 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -683,7 +683,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n", dev->type, skb->len); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_CAN_RX_INVALID_FRAME); return NET_RX_DROP; } @@ -698,7 +698,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n", dev->type, skb->len); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_CANFD_RX_INVALID_FRAME); return NET_RX_DROP; } @@ -713,7 +713,7 @@ static int canxl_rcv(struct sk_buff *skb, struct net_device *dev, pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n", dev->type, skb->len); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_CANXL_RX_INVALID_FRAME); return NET_RX_DROP; } diff --git a/net/can/bcm.c b/net/can/bcm.c index 6bc1cc4c94c5..5e690a2377e4 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -359,6 +359,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, unsigned int datalen = head->nframes * op->cfsiz; int err; unsigned int *pflags; + enum skb_drop_reason reason; skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); if (!skb) @@ -413,11 +414,11 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, addr->can_family = AF_CAN; addr->can_ifindex = op->rx_ifindex; - err = sock_queue_rcv_skb(sk, skb); + err = sock_queue_rcv_skb_reason(sk, skb, &reason); if (err < 0) { struct bcm_sock *bo = bcm_sk(sk); - kfree_skb(skb); + sk_skb_reason_drop(sk, skb, reason); /* don't care about overflows in this statistic */ bo->dropped_usr_msgs++; } diff --git a/net/can/isotp.c b/net/can/isotp.c index 1efa377f002e..dee1412b3c9c 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -278,6 +278,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus) static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk) { struct sockaddr_can *addr = (struct sockaddr_can *)skb->cb; + enum skb_drop_reason reason; BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can)); @@ -285,8 +286,8 @@ static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk) addr->can_family = AF_CAN; addr->can_ifindex = skb->dev->ifindex; - if (sock_queue_rcv_skb(sk, skb) < 0) - kfree_skb(skb); + if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) + sk_skb_reason_drop(sk, skb, reason); } static u8 padlen(u8 datalen) diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index 6fefe7a68761..3d8b588822f9 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -311,6 +311,7 @@ static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb) { const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb); struct j1939_sk_buff_cb *skcb; + enum skb_drop_reason reason; struct sk_buff *skb; if (oskb->sk == &jsk->sk) @@ -331,8 +332,8 @@ static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb) if (skb->sk) skcb->msg_flags |= MSG_DONTROUTE; - if (sock_queue_rcv_skb(&jsk->sk, skb) < 0) - kfree_skb(skb); + if (sock_queue_rcv_skb_reason(&jsk->sk, skb, &reason) < 0) + sk_skb_reason_drop(&jsk->sk, skb, reason); } bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb) diff --git a/net/can/proc.c b/net/can/proc.c index 25fdf060e30d..0938bf7dd646 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -114,7 +114,7 @@ static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif, void can_stat_update(struct timer_list *t) { - struct net *net = from_timer(net, t, can.stattimer); + struct net *net = timer_container_of(net, t, can.stattimer); struct can_pkg_stats *pkg_stats = net->can.pkg_stats; unsigned long j = jiffies; /* snapshot */ diff --git a/net/can/raw.c b/net/can/raw.c index 020f21430b1d..76b867d21def 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -129,6 +129,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data) { struct sock *sk = (struct sock *)data; struct raw_sock *ro = raw_sk(sk); + enum skb_drop_reason reason; struct sockaddr_can *addr; struct sk_buff *skb; unsigned int *pflags; @@ -205,8 +206,8 @@ static void raw_rcv(struct sk_buff *oskb, void *data) if (oskb->sk == sk) *pflags |= MSG_CONFIRM; - if (sock_queue_rcv_skb(sk, skb) < 0) - kfree_skb(skb); + if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) + sk_skb_reason_drop(sk, skb, reason); } static int raw_enable_filters(struct net *net, struct net_device *dev, diff --git a/net/core/dev.c b/net/core/dev.c index 2b514d95c528..df1678b1fe24 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3179,7 +3179,6 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) if (dev->reg_state == NETREG_REGISTERED || dev->reg_state == NETREG_UNREGISTERING) { - ASSERT_RTNL(); netdev_ops_assert_locked(dev); rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, @@ -3229,7 +3228,6 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) return -EINVAL; if (dev->reg_state == NETREG_REGISTERED) { - ASSERT_RTNL(); netdev_ops_assert_locked(dev); rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, @@ -6926,6 +6924,43 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) return HRTIMER_NORESTART; } +static void napi_stop_kthread(struct napi_struct *napi) +{ + unsigned long val, new; + + /* Wait until the napi STATE_THREADED is unset. */ + while (true) { + val = READ_ONCE(napi->state); + + /* If napi kthread own this napi or the napi is idle, + * STATE_THREADED can be unset here. + */ + if ((val & NAPIF_STATE_SCHED_THREADED) || + !(val & NAPIF_STATE_SCHED)) { + new = val & (~NAPIF_STATE_THREADED); + } else { + msleep(20); + continue; + } + + if (try_cmpxchg(&napi->state, &val, new)) + break; + } + + /* Once STATE_THREADED is unset, wait for SCHED_THREADED to be unset by + * the kthread. + */ + while (true) { + if (!test_bit(NAPIF_STATE_SCHED_THREADED, &napi->state)) + break; + + msleep(20); + } + + kthread_stop(napi->thread); + napi->thread = NULL; +} + int dev_set_threaded(struct net_device *dev, bool threaded) { struct napi_struct *napi; @@ -6961,8 +6996,12 @@ int dev_set_threaded(struct net_device *dev, bool threaded) * softirq mode will happen in the next round of napi_schedule(). * This should not cause hiccups/stalls to the live traffic. */ - list_for_each_entry(napi, &dev->napi_list, dev_list) - assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); + list_for_each_entry(napi, &dev->napi_list, dev_list) { + if (!threaded && napi->thread) + napi_stop_kthread(napi); + else + assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); + } return err; } @@ -9968,6 +10007,7 @@ int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf) return dev->netdev_ops->ndo_bpf(dev, bpf); } +EXPORT_SYMBOL_GPL(netif_xdp_propagate); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) { @@ -10498,7 +10538,7 @@ static void dev_index_release(struct net *net, int ifindex) static bool from_cleanup_net(void) { #ifdef CONFIG_NET_NS - return current == cleanup_net_task; + return current == READ_ONCE(cleanup_net_task); #else return false; #endif @@ -10729,12 +10769,14 @@ sync_lower: * *before* calling udp_tunnel_get_rx_info, * but *after* calling udp_tunnel_drop_rx_info. */ + udp_tunnel_nic_lock(dev); if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { dev->features = features; udp_tunnel_get_rx_info(dev); } else { udp_tunnel_drop_rx_info(dev); } + udp_tunnel_nic_unlock(dev); } if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { diff --git a/net/core/devmem.h b/net/core/devmem.h index e7ba77050b8f..0a3b28ba5c13 100644 --- a/net/core/devmem.h +++ b/net/core/devmem.h @@ -170,8 +170,9 @@ static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq) } static inline struct net_devmem_dmabuf_binding * -net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, +net_devmem_bind_dmabuf(struct net_device *dev, enum dma_data_direction direction, + unsigned int dmabuf_fd, struct netdev_nl_sock *priv, struct netlink_ext_ack *extack) { diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 8a7ce640f74d..60d31c2feed3 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -208,7 +208,7 @@ static void send_dm_alert(struct work_struct *work) */ static void sched_send_work(struct timer_list *t) { - struct per_cpu_dm_data *data = from_timer(data, t, send_timer); + struct per_cpu_dm_data *data = timer_container_of(data, t, send_timer); schedule_work(&data->dm_alert_work); } diff --git a/net/core/filter.c b/net/core/filter.c index 577a4504e26f..7a72f766aacf 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1968,10 +1968,11 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, bool is_pseudo = flags & BPF_F_PSEUDO_HDR; bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; bool do_mforce = flags & BPF_F_MARK_ENFORCE; + bool is_ipv6 = flags & BPF_F_IPV6; __sum16 *ptr; if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | - BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) + BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK | BPF_F_IPV6))) return -EINVAL; if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; @@ -1987,7 +1988,7 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, if (unlikely(from != 0)) return -EINVAL; - inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); + inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo, is_ipv6); break; case 2: inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); @@ -3232,6 +3233,13 @@ static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { .arg1_type = ARG_PTR_TO_CTX, }; +static void bpf_skb_change_protocol(struct sk_buff *skb, u16 proto) +{ + skb->protocol = htons(proto); + if (skb_valid_dst(skb)) + skb_dst_drop(skb); +} + static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) { /* Caller already did skb_cow() with len as headroom, @@ -3328,7 +3336,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) } } - skb->protocol = htons(ETH_P_IPV6); + bpf_skb_change_protocol(skb, ETH_P_IPV6); skb_clear_hash(skb); return 0; @@ -3358,7 +3366,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) } } - skb->protocol = htons(ETH_P_IP); + bpf_skb_change_protocol(skb, ETH_P_IP); skb_clear_hash(skb); return 0; @@ -3549,10 +3557,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, /* Match skb->protocol to new outer l3 protocol */ if (skb->protocol == htons(ETH_P_IP) && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) - skb->protocol = htons(ETH_P_IPV6); + bpf_skb_change_protocol(skb, ETH_P_IPV6); else if (skb->protocol == htons(ETH_P_IPV6) && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) - skb->protocol = htons(ETH_P_IP); + bpf_skb_change_protocol(skb, ETH_P_IP); } if (skb_is_gso(skb)) { @@ -3605,10 +3613,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, /* Match skb->protocol to new outer l3 protocol */ if (skb->protocol == htons(ETH_P_IP) && flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV6) - skb->protocol = htons(ETH_P_IPV6); + bpf_skb_change_protocol(skb, ETH_P_IPV6); else if (skb->protocol == htons(ETH_P_IPV6) && flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV4) - skb->protocol = htons(ETH_P_IP); + bpf_skb_change_protocol(skb, ETH_P_IP); if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); @@ -8023,10 +8031,6 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) if (func_proto) return func_proto; - func_proto = cgroup_current_func_proto(func_id, prog); - if (func_proto) - return func_proto; - switch (func_id) { case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_sock_proto; @@ -8052,10 +8056,6 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) if (func_proto) return func_proto; - func_proto = cgroup_current_func_proto(func_id, prog); - if (func_proto) - return func_proto; - switch (func_id) { case BPF_FUNC_bind: switch (prog->expected_attach_type) { @@ -8489,18 +8489,12 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_msg_pop_data_proto; case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; - case BPF_FUNC_get_current_uid_gid: - return &bpf_get_current_uid_gid_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; case BPF_FUNC_get_netns_cookie: return &bpf_get_netns_cookie_sk_msg_proto; -#ifdef CONFIG_CGROUP_NET_CLASSID - case BPF_FUNC_get_cgroup_classid: - return &bpf_get_cgroup_classid_curr_proto; -#endif default: return bpf_sk_base_func_proto(func_id, prog); } diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 2b821b9a8699..7d426a8e29f3 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -75,7 +75,7 @@ static void est_fetch_counters(struct net_rate_estimator *e, static void est_timer(struct timer_list *t) { - struct net_rate_estimator *est = from_timer(est, t, timer); + struct net_rate_estimator *est = timer_container_of(est, t, timer); struct gnet_stats_basic_sync b; u64 b_bytes, b_packets; u64 rate, brate; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index a6e2c91ec3e7..49dce9a82295 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1031,7 +1031,7 @@ static void neigh_probe(struct neighbour *neigh) static void neigh_timer_handler(struct timer_list *t) { unsigned long now, next; - struct neighbour *neigh = from_timer(neigh, t, timer); + struct neighbour *neigh = timer_container_of(neigh, t, timer); unsigned int state; int notify = 0; @@ -1569,7 +1569,7 @@ static void neigh_managed_work(struct work_struct *work) static void neigh_proxy_process(struct timer_list *t) { - struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); + struct neigh_table *tbl = timer_container_of(tbl, t, proxy_timer); long sched_next = 0; unsigned long now = jiffies; struct sk_buff *skb, *n; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 1ace0cd01adc..c9b969386399 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -641,12 +641,6 @@ static ssize_t phys_port_id_show(struct device *dev, struct netdev_phys_item_id ppid; ssize_t ret; - /* The check is also done in dev_get_phys_port_id; this helps returning - * early without hitting the locking section below. - */ - if (!netdev->netdev_ops->ndo_get_phys_port_id) - return -EOPNOTSUPP; - ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); if (ret) return ret; @@ -668,13 +662,6 @@ static ssize_t phys_port_name_show(struct device *dev, char name[IFNAMSIZ]; ssize_t ret; - /* The checks are also done in dev_get_phys_port_name; this helps - * returning early without hitting the locking section below. - */ - if (!netdev->netdev_ops->ndo_get_phys_port_name && - !netdev->devlink_port) - return -EOPNOTSUPP; - ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); if (ret) return ret; @@ -696,14 +683,6 @@ static ssize_t phys_switch_id_show(struct device *dev, struct netdev_phys_item_id ppid = { }; ssize_t ret; - /* The checks are also done in dev_get_phys_port_name; this helps - * returning early without hitting the locking section below. This works - * because recurse is false when calling dev_get_port_parent_id. - */ - if (!netdev->netdev_ops->ndo_get_port_parent_id && - !netdev->devlink_port) - return -EOPNOTSUPP; - ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); if (ret) return ret; @@ -718,6 +697,40 @@ static ssize_t phys_switch_id_show(struct device *dev, } static DEVICE_ATTR_RO(phys_switch_id); +static struct attribute *netdev_phys_attrs[] __ro_after_init = { + &dev_attr_phys_port_id.attr, + &dev_attr_phys_port_name.attr, + &dev_attr_phys_switch_id.attr, + NULL, +}; + +static umode_t netdev_phys_is_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct net_device *netdev = to_net_dev(dev); + + if (attr == &dev_attr_phys_port_id.attr) { + if (!netdev->netdev_ops->ndo_get_phys_port_id) + return 0; + } else if (attr == &dev_attr_phys_port_name.attr) { + if (!netdev->netdev_ops->ndo_get_phys_port_name && + !netdev->devlink_port) + return 0; + } else if (attr == &dev_attr_phys_switch_id.attr) { + if (!netdev->netdev_ops->ndo_get_port_parent_id && + !netdev->devlink_port) + return 0; + } + + return attr->mode; +} + +static const struct attribute_group netdev_phys_group = { + .attrs = netdev_phys_attrs, + .is_visible = netdev_phys_is_visible, +}; + static ssize_t threaded_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -783,9 +796,6 @@ static struct attribute *net_class_attrs[] __ro_after_init = { &dev_attr_tx_queue_len.attr, &dev_attr_gro_flush_timeout.attr, &dev_attr_napi_defer_hard_irqs.attr, - &dev_attr_phys_port_id.attr, - &dev_attr_phys_port_name.attr, - &dev_attr_phys_switch_id.attr, &dev_attr_proto_down.attr, &dev_attr_carrier_up_count.attr, &dev_attr_carrier_down_count.attr, @@ -2328,6 +2338,7 @@ int netdev_register_kobject(struct net_device *ndev) groups++; *groups++ = &netstat_group; + *groups++ = &netdev_phys_group; if (wireless_group_needed(ndev)) *groups++ = &wireless_group; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 42ee7fce3d95..ae54f26709ca 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -654,7 +654,7 @@ static void cleanup_net(struct work_struct *work) struct net *net, *tmp, *last; LIST_HEAD(net_exit_list); - cleanup_net_task = current; + WRITE_ONCE(cleanup_net_task, current); /* Atomically snapshot the list of namespaces to cleanup */ net_kill_list = llist_del_all(&cleanup_list); @@ -704,7 +704,7 @@ static void cleanup_net(struct work_struct *work) put_user_ns(net->user_ns); net_passive_dec(net); } - cleanup_net_task = NULL; + WRITE_ONCE(cleanup_net_task, NULL); } /** diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 4ddb7490df4b..07c453864a7d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -58,13 +58,6 @@ static void zap_completion_queue(void); static unsigned int carrier_timeout = 4; module_param(carrier_timeout, uint, 0644); -#define np_info(np, fmt, ...) \ - pr_info("%s: " fmt, np->name, ##__VA_ARGS__) -#define np_err(np, fmt, ...) \ - pr_err("%s: " fmt, np->name, ##__VA_ARGS__) -#define np_notice(np, fmt, ...) \ - pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) - static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) @@ -499,43 +492,6 @@ int netpoll_send_udp(struct netpoll *np, const char *msg, int len) } EXPORT_SYMBOL(netpoll_send_udp); -void netpoll_print_options(struct netpoll *np) -{ - np_info(np, "local port %d\n", np->local_port); - if (np->ipv6) - np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6); - else - np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip); - np_info(np, "interface name '%s'\n", np->dev_name); - np_info(np, "local ethernet address '%pM'\n", np->dev_mac); - np_info(np, "remote port %d\n", np->remote_port); - if (np->ipv6) - np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6); - else - np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip); - np_info(np, "remote ethernet address %pM\n", np->remote_mac); -} -EXPORT_SYMBOL(netpoll_print_options); - -static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr) -{ - const char *end; - - if (!strchr(str, ':') && - in4_pton(str, -1, (void *)addr, -1, &end) > 0) { - if (!*end) - return 0; - } - if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) { -#if IS_ENABLED(CONFIG_IPV6) - if (!*end) - return 1; -#else - return -1; -#endif - } - return -1; -} static void skb_pool_flush(struct netpoll *np) { @@ -546,95 +502,6 @@ static void skb_pool_flush(struct netpoll *np) skb_queue_purge_reason(skb_pool, SKB_CONSUMED); } -int netpoll_parse_options(struct netpoll *np, char *opt) -{ - char *cur=opt, *delim; - int ipv6; - bool ipversion_set = false; - - if (*cur != '@') { - if ((delim = strchr(cur, '@')) == NULL) - goto parse_failed; - *delim = 0; - if (kstrtou16(cur, 10, &np->local_port)) - goto parse_failed; - cur = delim; - } - cur++; - - if (*cur != '/') { - ipversion_set = true; - if ((delim = strchr(cur, '/')) == NULL) - goto parse_failed; - *delim = 0; - ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip); - if (ipv6 < 0) - goto parse_failed; - else - np->ipv6 = (bool)ipv6; - cur = delim; - } - cur++; - - if (*cur != ',') { - /* parse out dev_name or dev_mac */ - if ((delim = strchr(cur, ',')) == NULL) - goto parse_failed; - *delim = 0; - - np->dev_name[0] = '\0'; - eth_broadcast_addr(np->dev_mac); - if (!strchr(cur, ':')) - strscpy(np->dev_name, cur, sizeof(np->dev_name)); - else if (!mac_pton(cur, np->dev_mac)) - goto parse_failed; - - cur = delim; - } - cur++; - - if (*cur != '@') { - /* dst port */ - if ((delim = strchr(cur, '@')) == NULL) - goto parse_failed; - *delim = 0; - if (*cur == ' ' || *cur == '\t') - np_info(np, "warning: whitespace is not allowed\n"); - if (kstrtou16(cur, 10, &np->remote_port)) - goto parse_failed; - cur = delim; - } - cur++; - - /* dst ip */ - if ((delim = strchr(cur, '/')) == NULL) - goto parse_failed; - *delim = 0; - ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); - if (ipv6 < 0) - goto parse_failed; - else if (ipversion_set && np->ipv6 != (bool)ipv6) - goto parse_failed; - else - np->ipv6 = (bool)ipv6; - cur = delim + 1; - - if (*cur != 0) { - /* MAC address */ - if (!mac_pton(cur, np->remote_mac)) - goto parse_failed; - } - - netpoll_print_options(np); - - return 0; - - parse_failed: - np_info(np, "couldn't parse config at '%s'!\n", cur); - return -1; -} -EXPORT_SYMBOL(netpoll_parse_options); - static void refill_skbs_work_handler(struct work_struct *work) { struct netpoll *np = @@ -863,7 +730,7 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) kfree(npinfo); } -void __netpoll_cleanup(struct netpoll *np) +static void __netpoll_cleanup(struct netpoll *np) { struct netpoll_info *npinfo; @@ -885,7 +752,6 @@ void __netpoll_cleanup(struct netpoll *np) skb_pool_flush(np); } -EXPORT_SYMBOL_GPL(__netpoll_cleanup); void __netpoll_free(struct netpoll *np) { diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 4011eb305cee..ba7cf3e3c32f 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -153,9 +153,9 @@ u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) EXPORT_SYMBOL(page_pool_ethtool_stats_get); #else -#define alloc_stat_inc(pool, __stat) -#define recycle_stat_inc(pool, __stat) -#define recycle_stat_add(pool, __stat, val) +#define alloc_stat_inc(...) do { } while (0) +#define recycle_stat_inc(...) do { } while (0) +#define recycle_stat_add(...) do { } while (0) #endif static bool page_pool_producer_lock(struct page_pool *pool) @@ -741,19 +741,16 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem) static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) { - int ret; - /* BH protection not needed if current is softirq */ - if (in_softirq()) - ret = ptr_ring_produce(&pool->ring, (__force void *)netmem); - else - ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem); + bool in_softirq, ret; - if (!ret) { + /* BH protection not needed if current is softirq */ + in_softirq = page_pool_producer_lock(pool); + ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem); + if (ret) recycle_stat_inc(pool, ring); - return true; - } + page_pool_producer_unlock(pool, in_softirq); - return false; + return ret; } /* Only allow direct recycling in special circumstances, into the @@ -1150,10 +1147,14 @@ static void page_pool_scrub(struct page_pool *pool) static int page_pool_release(struct page_pool *pool) { + bool in_softirq; int inflight; page_pool_scrub(pool); inflight = page_pool_inflight(pool, true); + /* Acquire producer lock to make sure producers have exited. */ + in_softirq = page_pool_producer_lock(pool); + page_pool_producer_unlock(pool, in_softirq); if (!inflight) __page_pool_destroy(pool); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f9a35bdc58ad..c57692eb8da9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3671,7 +3671,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, if (tb[IFLA_LINKMODE]) dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); if (tb[IFLA_GROUP]) - dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); + netif_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); if (tb[IFLA_GSO_MAX_SIZE]) netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); if (tb[IFLA_GSO_MAX_SEGS]) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 85fc82f72d26..d6420b74ea9c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -6261,9 +6261,6 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) if (!pskb_may_pull(skb, write_len)) return -ENOMEM; - if (!skb_frags_readable(skb)) - return -EFAULT; - if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) return 0; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 0ddc4c718833..34c51eb1a14f 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -530,16 +530,22 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, u32 off, u32 len, struct sk_psock *psock, struct sock *sk, - struct sk_msg *msg) + struct sk_msg *msg, + bool take_ref) { int num_sge, copied; + /* skb_to_sgvec will fail when the total number of fragments in + * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the + * caller may aggregate multiple skbs. + */ num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); if (num_sge < 0) { /* skb linearize may fail with ENOMEM, but lets simply try again * later if this happens. Under memory pressure we don't want to * drop the skb. We need to linearize the skb so that the mapping * in skb_to_sgvec can not error. + * Note that skb_linearize requires the skb not to be shared. */ if (skb_linearize(skb)) return -EAGAIN; @@ -556,7 +562,7 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, msg->sg.start = 0; msg->sg.size = copied; msg->sg.end = num_sge; - msg->skb = skb; + msg->skb = take_ref ? skb_get(skb) : skb; sk_psock_queue_msg(psock, msg); sk_psock_data_ready(sk, psock); @@ -564,7 +570,7 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, } static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, - u32 off, u32 len); + u32 off, u32 len, bool take_ref); static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, u32 off, u32 len) @@ -578,7 +584,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, * correctly. */ if (unlikely(skb->sk == sk)) - return sk_psock_skb_ingress_self(psock, skb, off, len); + return sk_psock_skb_ingress_self(psock, skb, off, len, true); msg = sk_psock_create_ingress_msg(sk, skb); if (!msg) return -EAGAIN; @@ -590,7 +596,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, * into user buffers. */ skb_set_owner_r(skb, sk); - err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true); if (err < 0) kfree(msg); return err; @@ -601,7 +607,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, * because the skb is already accounted for here. */ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, - u32 off, u32 len) + u32 off, u32 len, bool take_ref) { struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC); struct sock *sk = psock->sk; @@ -610,7 +616,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb if (unlikely(!msg)) return -EAGAIN; skb_set_owner_r(skb, sk); - err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref); if (err < 0) kfree(msg); return err; @@ -619,18 +625,13 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, u32 off, u32 len, bool ingress) { - int err = 0; - if (!ingress) { if (!sock_writeable(psock->sk)) return -EAGAIN; return skb_send_sock(psock->sk, skb, off, len); } - skb_get(skb); - err = sk_psock_skb_ingress(psock, skb, off, len); - if (err < 0) - kfree_skb(skb); - return err; + + return sk_psock_skb_ingress(psock, skb, off, len); } static void sk_psock_skb_state(struct sk_psock *psock, @@ -655,12 +656,14 @@ static void sk_psock_backlog(struct work_struct *work) bool ingress; int ret; + /* Increment the psock refcnt to synchronize with close(fd) path in + * sock_map_close(), ensuring we wait for backlog thread completion + * before sk_socket freed. If refcnt increment fails, it indicates + * sock_map_close() completed with sk_socket potentially already freed. + */ + if (!sk_psock_get(psock->sk)) + return; mutex_lock(&psock->work_mutex); - if (unlikely(state->len)) { - len = state->len; - off = state->off; - } - while ((skb = skb_peek(&psock->ingress_skb))) { len = skb->len; off = 0; @@ -670,6 +673,13 @@ static void sk_psock_backlog(struct work_struct *work) off = stm->offset; len = stm->full_len; } + + /* Resume processing from previous partial state */ + if (unlikely(state->len)) { + len = state->len; + off = state->off; + } + ingress = skb_bpf_ingress(skb); skb_bpf_redirect_clear(skb); do { @@ -680,7 +690,8 @@ static void sk_psock_backlog(struct work_struct *work) if (ret <= 0) { if (ret == -EAGAIN) { sk_psock_skb_state(psock, state, len, off); - + /* Restore redir info we cleared before */ + skb_bpf_set_redir(skb, psock->sk, ingress); /* Delay slightly to prioritize any * other work that might be here. */ @@ -697,11 +708,14 @@ static void sk_psock_backlog(struct work_struct *work) len -= ret; } while (len); + /* The entire skb sent, clear state */ + sk_psock_skb_state(psock, state, 0, 0); skb = skb_dequeue(&psock->ingress_skb); kfree_skb(skb); } end: mutex_unlock(&psock->work_mutex); + sk_psock_put(psock->sk, psock); } struct sk_psock *sk_psock_init(struct sock *sk, int node) @@ -1014,7 +1028,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, off = stm->offset; len = stm->full_len; } - err = sk_psock_skb_ingress_self(psock, skb, off, len); + err = sk_psock_skb_ingress_self(psock, skb, off, len, false); } if (err < 0) { spin_lock_bh(&psock->ingress_lock); diff --git a/net/core/sock.c b/net/core/sock.c index 341979874459..502042a0d3b5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -837,14 +837,6 @@ static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) } } -void sock_enable_timestamps(struct sock *sk) -{ - lock_sock(sk); - __sock_set_timestamps(sk, true, false, true); - release_sock(sk); -} -EXPORT_SYMBOL(sock_enable_timestamps); - void sock_set_timestamp(struct sock *sk, int optname, bool valbool) { switch (optname) { @@ -3284,16 +3276,16 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) { struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL; struct proto *prot = sk->sk_prot; - bool charged = false; + bool charged = true; long allocated; sk_memory_allocated_add(sk, amt); allocated = sk_memory_allocated(sk); if (memcg) { - if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge())) + charged = mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()); + if (!charged) goto suppress_allocation; - charged = true; } /* Under limit. */ @@ -3378,7 +3370,7 @@ suppress_allocation: sk_memory_allocated_sub(sk, amt); - if (charged) + if (memcg && charged) mem_cgroup_uncharge_skmem(memcg, amt); return 0; diff --git a/net/core/utils.c b/net/core/utils.c index e47feeaa5a49..5e63b0ea21f3 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -473,11 +473,11 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, EXPORT_SYMBOL(inet_proto_csum_replace16); void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, - __wsum diff, bool pseudohdr) + __wsum diff, bool pseudohdr, bool ipv6) { if (skb->ip_summed != CHECKSUM_PARTIAL) { csum_replace_by_diff(sum, diff); - if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) + if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr && !ipv6) skb->csum = ~csum_sub(diff, skb->csum); } else if (pseudohdr) { *sum = ~csum_fold(csum_add(diff, csum_unfold(*sum))); diff --git a/net/devlink/param.c b/net/devlink/param.c index b29abf8d3ed4..396b8a7f6013 100644 --- a/net/devlink/param.c +++ b/net/devlink/param.c @@ -92,6 +92,11 @@ static const struct devlink_param devlink_param_generic[] = { .name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME, .type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE, }, + { + .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC, + .name = DEVLINK_PARAM_GENERIC_ENABLE_PHC_NAME, + .type = DEVLINK_PARAM_GENERIC_ENABLE_PHC_TYPE, + }, }; static int devlink_param_generic_verify(const struct devlink_param *param) diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 2dfe9063613f..869cbe57162f 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -42,12 +42,24 @@ config NET_DSA_TAG_BRCM Broadcom switches which place the tag after the MAC source address. config NET_DSA_TAG_BRCM_LEGACY - tristate "Tag driver for Broadcom legacy switches using in-frame headers" + tristate "Tag driver for BCM63xx legacy switches using in-frame headers" select NET_DSA_TAG_BRCM_COMMON help Say Y if you want to enable support for tagging frames for the - Broadcom legacy switches which place the tag after the MAC source + BCM63xx legacy switches which place the tag after the MAC source address. + This tag is used in BCM63xx legacy switches which work without the + original FCS and length before the tag insertion. + +config NET_DSA_TAG_BRCM_LEGACY_FCS + tristate "Tag driver for BCM53xx legacy switches using in-frame headers" + select NET_DSA_TAG_BRCM_COMMON + help + Say Y if you want to enable support for tagging frames for the + BCM53xx legacy switches which place the tag after the MAC source + address. + This tag is used in BCM53xx legacy switches which expect original + FCS and length before the tag insertion to be present. config NET_DSA_TAG_BRCM_PREPEND tristate "Tag driver for Broadcom switches using prepended headers" diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 8c3c068728e5..26bb657ceac3 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -15,6 +15,7 @@ #define BRCM_NAME "brcm" #define BRCM_LEGACY_NAME "brcm-legacy" +#define BRCM_LEGACY_FCS_NAME "brcm-legacy-fcs" #define BRCM_PREPEND_NAME "brcm-prepend" /* Legacy Broadcom tag (6 bytes) */ @@ -32,6 +33,10 @@ #define BRCM_LEG_MULTICAST (1 << 5) #define BRCM_LEG_EGRESS (2 << 5) #define BRCM_LEG_INGRESS (3 << 5) +#define BRCM_LEG_LEN_HI(x) (((x) >> 8) & 0x7) + +/* 4th byte in the tag */ +#define BRCM_LEG_LEN_LO(x) ((x) & 0xff) /* 6th byte in the tag */ #define BRCM_LEG_PORT_ID (0xf) @@ -212,6 +217,41 @@ DSA_TAG_DRIVER(brcm_netdev_ops); MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM, BRCM_NAME); #endif +#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY) || \ + IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS) +static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb, + struct net_device *dev) +{ + int len = BRCM_LEG_TAG_LEN; + int source_port; + u8 *brcm_tag; + + if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN))) + return NULL; + + brcm_tag = dsa_etype_header_pos_rx(skb); + + source_port = brcm_tag[5] & BRCM_LEG_PORT_ID; + + skb->dev = dsa_conduit_find_user(dev, 0, source_port); + if (!skb->dev) + return NULL; + + /* VLAN tag is added by BCM63xx internal switch */ + if (netdev_uses_dsa(skb->dev)) + len += VLAN_HLEN; + + /* Remove Broadcom tag and update checksum */ + skb_pull_rcsum(skb, len); + + dsa_default_offload_fwd_mark(skb); + + dsa_strip_etype_header(skb, len); + + return skb; +} +#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY || CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */ + #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY) static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb, struct net_device *dev) @@ -250,49 +290,77 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb, return skb; } -static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb, - struct net_device *dev) +static const struct dsa_device_ops brcm_legacy_netdev_ops = { + .name = BRCM_LEGACY_NAME, + .proto = DSA_TAG_PROTO_BRCM_LEGACY, + .xmit = brcm_leg_tag_xmit, + .rcv = brcm_leg_tag_rcv, + .needed_headroom = BRCM_LEG_TAG_LEN, +}; + +DSA_TAG_DRIVER(brcm_legacy_netdev_ops); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME); +#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */ + +#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS) +static struct sk_buff *brcm_leg_fcs_tag_xmit(struct sk_buff *skb, + struct net_device *dev) { - int len = BRCM_LEG_TAG_LEN; - int source_port; + struct dsa_port *dp = dsa_user_to_port(dev); + unsigned int fcs_len; + __le32 fcs_val; u8 *brcm_tag; - if (unlikely(!pskb_may_pull(skb, BRCM_LEG_PORT_ID))) + /* The Ethernet switch we are interfaced with needs packets to be at + * least 64 bytes (including FCS) otherwise they will be discarded when + * they enter the switch port logic. When Broadcom tags are enabled, we + * need to make sure that packets are at least 70 bytes (including FCS + * and tag) because the length verification is done after the Broadcom + * tag is stripped off the ingress packet. + * + * Let dsa_user_xmit() free the SKB. + */ + if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false)) return NULL; - brcm_tag = dsa_etype_header_pos_rx(skb); + fcs_len = skb->len; + fcs_val = cpu_to_le32(crc32_le(~0, skb->data, fcs_len) ^ ~0); - source_port = brcm_tag[5] & BRCM_LEG_PORT_ID; + skb_push(skb, BRCM_LEG_TAG_LEN); - skb->dev = dsa_conduit_find_user(dev, 0, source_port); - if (!skb->dev) - return NULL; + dsa_alloc_etype_header(skb, BRCM_LEG_TAG_LEN); - /* VLAN tag is added by BCM63xx internal switch */ - if (netdev_uses_dsa(skb->dev)) - len += VLAN_HLEN; + brcm_tag = skb->data + 2 * ETH_ALEN; - /* Remove Broadcom tag and update checksum */ - skb_pull_rcsum(skb, len); + /* Broadcom tag type */ + brcm_tag[0] = BRCM_LEG_TYPE_HI; + brcm_tag[1] = BRCM_LEG_TYPE_LO; - dsa_default_offload_fwd_mark(skb); + /* Broadcom tag value */ + brcm_tag[2] = BRCM_LEG_EGRESS | BRCM_LEG_LEN_HI(fcs_len); + brcm_tag[3] = BRCM_LEG_LEN_LO(fcs_len); + brcm_tag[4] = 0; + brcm_tag[5] = dp->index & BRCM_LEG_PORT_ID; - dsa_strip_etype_header(skb, len); + /* Original FCS value */ + if (__skb_pad(skb, ETH_FCS_LEN, false)) + return NULL; + skb_put_data(skb, &fcs_val, ETH_FCS_LEN); return skb; } -static const struct dsa_device_ops brcm_legacy_netdev_ops = { - .name = BRCM_LEGACY_NAME, - .proto = DSA_TAG_PROTO_BRCM_LEGACY, - .xmit = brcm_leg_tag_xmit, +static const struct dsa_device_ops brcm_legacy_fcs_netdev_ops = { + .name = BRCM_LEGACY_FCS_NAME, + .proto = DSA_TAG_PROTO_BRCM_LEGACY_FCS, + .xmit = brcm_leg_fcs_tag_xmit, .rcv = brcm_leg_tag_rcv, .needed_headroom = BRCM_LEG_TAG_LEN, }; -DSA_TAG_DRIVER(brcm_legacy_netdev_ops); -MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME); -#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */ +DSA_TAG_DRIVER(brcm_legacy_fcs_netdev_ops); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY_FCS, BRCM_LEGACY_FCS_NAME); +#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */ #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND) static struct sk_buff *brcm_tag_xmit_prepend(struct sk_buff *skb, @@ -328,6 +396,9 @@ static struct dsa_tag_driver *dsa_tag_driver_array[] = { #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY) &DSA_TAG_DRIVER_NAME(brcm_legacy_netdev_ops), #endif +#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS) + &DSA_TAG_DRIVER_NAME(brcm_legacy_fcs_netdev_ops), +#endif #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND) &DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops), #endif diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 39ec920f5de7..a14cf901c32d 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1048,9 +1048,20 @@ static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm) continue; info.flow_type = i; - err = ops->get_rxnfc(dev, &info, NULL); - if (err) - continue; + + if (ops->get_rxfh_fields) { + struct ethtool_rxfh_fields fields = { + .flow_type = info.flow_type, + }; + + if (ops->get_rxfh_fields(dev, &fields)) + continue; + + info.data = fields.data; + } else { + if (ops->get_rxnfc(dev, &info, NULL)) + continue; + } err = ethtool_check_xfrm_rxfh(input_xfrm, info.data); if (err) @@ -1060,6 +1071,90 @@ static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm) return 0; } +static noinline_for_stack int +ethtool_set_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxfh_fields fields = {}; + struct ethtool_rxnfc info; + size_t info_size = sizeof(info); + int rc; + + if (!ops->set_rxnfc && !ops->set_rxfh_fields) + return -EOPNOTSUPP; + + rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); + if (rc) + return rc; + + if (info.flow_type & FLOW_RSS && info.rss_context && + !ops->rxfh_per_ctx_fields) + return -EINVAL; + + if (ops->get_rxfh) { + struct ethtool_rxfh_param rxfh = {}; + + rc = ops->get_rxfh(dev, &rxfh); + if (rc) + return rc; + + rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data); + if (rc) + return rc; + } + + if (!ops->set_rxfh_fields) + return ops->set_rxnfc(dev, &info); + + fields.data = info.data; + fields.flow_type = info.flow_type & ~FLOW_RSS; + if (info.flow_type & FLOW_RSS) + fields.rss_context = info.rss_context; + + return ops->set_rxfh_fields(dev, &fields, NULL); +} + +static noinline_for_stack int +ethtool_get_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr) +{ + struct ethtool_rxnfc info; + size_t info_size = sizeof(info); + const struct ethtool_ops *ops = dev->ethtool_ops; + int ret; + + if (!ops->get_rxnfc && !ops->get_rxfh_fields) + return -EOPNOTSUPP; + + ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); + if (ret) + return ret; + + if (info.flow_type & FLOW_RSS && info.rss_context && + !ops->rxfh_per_ctx_fields) + return -EINVAL; + + if (ops->get_rxfh_fields) { + struct ethtool_rxfh_fields fields = { + .flow_type = info.flow_type & ~FLOW_RSS, + }; + + if (info.flow_type & FLOW_RSS) + fields.rss_context = info.rss_context; + + ret = ops->get_rxfh_fields(dev, &fields); + if (ret < 0) + return ret; + + info.data = fields.data; + } else { + ret = ops->get_rxnfc(dev, &info, NULL); + if (ret < 0) + return ret; + } + + return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL); +} + static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, u32 cmd, void __user *useraddr) { @@ -1083,22 +1178,11 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, ethtool_get_flow_spec_ring(info.fs.ring_cookie)) return -EINVAL; - if (!xa_load(&dev->ethtool->rss_ctx, info.rss_context)) + if (info.rss_context && + !xa_load(&dev->ethtool->rss_ctx, info.rss_context)) return -EINVAL; } - if (cmd == ETHTOOL_SRXFH && ops->get_rxfh) { - struct ethtool_rxfh_param rxfh = {}; - - rc = ops->get_rxfh(dev, &rxfh); - if (rc) - return rc; - - rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data); - if (rc) - return rc; - } - rc = ops->set_rxnfc(dev, &info); if (rc) return rc; @@ -1444,7 +1528,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, u8 *rss_config; int ret; - if (!ops->get_rxnfc || !ops->set_rxfh) + if ((!ops->get_rxnfc && !ops->get_rxfh_fields) || !ops->set_rxfh) return -EOPNOTSUPP; if (ops->get_rxfh_indir_size) @@ -3338,13 +3422,17 @@ __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr, dev->ethtool_ops->set_priv_flags); break; case ETHTOOL_GRXFH: + rc = ethtool_get_rxfh_fields(dev, ethcmd, useraddr); + break; + case ETHTOOL_SRXFH: + rc = ethtool_set_rxfh_fields(dev, ethcmd, useraddr); + break; case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); break; - case ETHTOOL_SRXFH: case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLINS: rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); diff --git a/net/ethtool/mm.c b/net/ethtool/mm.c index ad9b40034003..29bbbc149375 100644 --- a/net/ethtool/mm.c +++ b/net/ethtool/mm.c @@ -315,7 +315,7 @@ static void ethtool_mmsv_send_mpacket(struct ethtool_mmsv *mmsv, */ static void ethtool_mmsv_verify_timer(struct timer_list *t) { - struct ethtool_mmsv *mmsv = from_timer(mmsv, t, verify_timer); + struct ethtool_mmsv *mmsv = timer_container_of(mmsv, t, verify_timer); unsigned long flags; bool rearm = false; diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c index 4f6b99eab2a6..6c536dfe52da 100644 --- a/net/ethtool/pse-pd.c +++ b/net/ethtool/pse-pd.c @@ -83,6 +83,8 @@ static int pse_reply_size(const struct ethnl_req_info *req_base, const struct ethtool_pse_control_status *st = &data->status; int len = 0; + if (st->pw_d_id) + len += nla_total_size(sizeof(u32)); /* _PSE_PW_D_ID */ if (st->podl_admin_state > 0) len += nla_total_size(sizeof(u32)); /* _PODL_PSE_ADMIN_STATE */ if (st->podl_pw_status > 0) @@ -109,6 +111,9 @@ static int pse_reply_size(const struct ethnl_req_info *req_base, len += st->c33_pw_limit_nb_ranges * (nla_total_size(0) + nla_total_size(sizeof(u32)) * 2); + if (st->prio_max) + /* _PSE_PRIO_MAX + _PSE_PRIO */ + len += nla_total_size(sizeof(u32)) * 2; return len; } @@ -148,6 +153,11 @@ static int pse_fill_reply(struct sk_buff *skb, const struct pse_reply_data *data = PSE_REPDATA(reply_base); const struct ethtool_pse_control_status *st = &data->status; + if (st->pw_d_id && + nla_put_u32(skb, ETHTOOL_A_PSE_PW_D_ID, + st->pw_d_id)) + return -EMSGSIZE; + if (st->podl_admin_state > 0 && nla_put_u32(skb, ETHTOOL_A_PODL_PSE_ADMIN_STATE, st->podl_admin_state)) @@ -198,6 +208,11 @@ static int pse_fill_reply(struct sk_buff *skb, pse_put_pw_limit_ranges(skb, st)) return -EMSGSIZE; + if (st->prio_max && + (nla_put_u32(skb, ETHTOOL_A_PSE_PRIO_MAX, st->prio_max) || + nla_put_u32(skb, ETHTOOL_A_PSE_PRIO, st->prio))) + return -EMSGSIZE; + return 0; } @@ -219,6 +234,7 @@ const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = { NLA_POLICY_RANGE(NLA_U32, ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED, ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED), [ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT] = { .type = NLA_U32 }, + [ETHTOOL_A_PSE_PRIO] = { .type = NLA_U32 }, }; static int @@ -267,6 +283,15 @@ ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info) if (ret) return ret; + if (tb[ETHTOOL_A_PSE_PRIO]) { + unsigned int prio; + + prio = nla_get_u32(tb[ETHTOOL_A_PSE_PRIO]); + ret = pse_ethtool_set_prio(phydev->psec, info->extack, prio); + if (ret) + return ret; + } + if (tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]) { unsigned int pw_limit; @@ -315,3 +340,42 @@ const struct ethnl_request_ops ethnl_pse_request_ops = { .set = ethnl_set_pse, /* PSE has no notification */ }; + +void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notifs) +{ + void *reply_payload; + struct sk_buff *skb; + int reply_len; + int ret; + + ASSERT_RTNL(); + + if (!netdev || !notifs) + return; + + reply_len = ethnl_reply_header_size() + + nla_total_size(sizeof(u32)); /* _PSE_NTF_EVENTS */ + + skb = genlmsg_new(reply_len, GFP_KERNEL); + if (!skb) + return; + + reply_payload = ethnl_bcastmsg_put(skb, ETHTOOL_MSG_PSE_NTF); + if (!reply_payload) + goto err_skb; + + ret = ethnl_fill_reply_header(skb, netdev, ETHTOOL_A_PSE_NTF_HEADER); + if (ret < 0) + goto err_skb; + + if (nla_put_uint(skb, ETHTOOL_A_PSE_NTF_EVENTS, notifs)) + goto err_skb; + + genlmsg_end(skb, reply_payload); + ethnl_multicast(skb, netdev); + return; + +err_skb: + nlmsg_free(skb); +} +EXPORT_SYMBOL_GPL(ethnl_pse_send_ntf); diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 0d1e56965af0..88657255fec1 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -408,7 +408,7 @@ static void hsr_announce(struct timer_list *t) struct hsr_port *master; unsigned long interval; - hsr = from_timer(hsr, t, announce_timer); + hsr = timer_container_of(hsr, t, announce_timer); rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); @@ -424,7 +424,8 @@ static void hsr_announce(struct timer_list *t) */ static void hsr_proxy_announce(struct timer_list *t) { - struct hsr_priv *hsr = from_timer(hsr, t, announce_proxy_timer); + struct hsr_priv *hsr = timer_container_of(hsr, t, + announce_proxy_timer); struct hsr_port *interlink; unsigned long interval = 0; struct hsr_node *node; diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 4ce471a2f387..3a2a2fa7a0a3 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -617,7 +617,7 @@ static struct hsr_port *get_late_port(struct hsr_priv *hsr, */ void hsr_prune_nodes(struct timer_list *t) { - struct hsr_priv *hsr = from_timer(hsr, t, prune_timer); + struct hsr_priv *hsr = timer_container_of(hsr, t, prune_timer); struct hsr_node *node; struct hsr_node *tmp; struct hsr_port *port; @@ -685,7 +685,7 @@ void hsr_prune_nodes(struct timer_list *t) void hsr_prune_proxy_nodes(struct timer_list *t) { - struct hsr_priv *hsr = from_timer(hsr, t, prune_proxy_timer); + struct hsr_priv *hsr = timer_container_of(hsr, t, prune_proxy_timer); unsigned long timestamp; struct hsr_node *node; struct hsr_node *tmp; diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index d4b983d17038..ddb6a5817d09 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -44,7 +44,7 @@ static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) static void lowpan_frag_expire(struct timer_list *t) { - struct inet_frag_queue *frag = from_timer(frag, t, timer); + struct inet_frag_queue *frag = timer_container_of(frag, t, timer); struct frag_queue *fq; int refs = 1; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index a648fff71ea7..c0440d61cf2f 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -966,6 +966,7 @@ static int arp_is_multicast(const void *pkey) static int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { + enum skb_drop_reason drop_reason; const struct arphdr *arp; /* do not tweak dropwatch on an ARP we will ignore */ @@ -979,12 +980,15 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev, goto out_of_mem; /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ - if (!pskb_may_pull(skb, arp_hdr_len(dev))) + drop_reason = pskb_may_pull_reason(skb, arp_hdr_len(dev)); + if (drop_reason != SKB_NOT_DROPPED_YET) goto freeskb; arp = arp_hdr(skb); - if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4) + if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4) { + drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; goto freeskb; + } memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); @@ -996,7 +1000,7 @@ consumeskb: consume_skb(skb); return NET_RX_SUCCESS; freeskb: - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); out_of_mem: return NET_RX_DROP; } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index ca7d539b3846..d1769034b643 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -801,7 +801,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, static void igmp_gq_timer_expire(struct timer_list *t) { - struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer); + struct in_device *in_dev = timer_container_of(in_dev, t, mr_gq_timer); in_dev->mr_gq_running = 0; igmpv3_send_report(in_dev, NULL); @@ -810,7 +810,7 @@ static void igmp_gq_timer_expire(struct timer_list *t) static void igmp_ifc_timer_expire(struct timer_list *t) { - struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer); + struct in_device *in_dev = timer_container_of(in_dev, t, mr_ifc_timer); u32 mr_ifc_count; igmpv3_send_cr(in_dev); @@ -840,7 +840,7 @@ static void igmp_ifc_event(struct in_device *in_dev) static void igmp_timer_expire(struct timer_list *t) { - struct ip_mc_list *im = from_timer(im, t, timer); + struct ip_mc_list *im = timer_container_of(im, t, timer); struct in_device *in_dev = im->interface; spin_lock(&im->lock); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 20915895bdaa..6906bedad19a 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1065,7 +1065,7 @@ EXPORT_IPV6_MOD(inet_csk_reqsk_queue_drop_and_put); static void reqsk_timer_handler(struct timer_list *t) { - struct request_sock *req = from_timer(req, t, rsk_timer); + struct request_sock *req = timer_container_of(req, t, rsk_timer); struct request_sock *nreq = NULL, *oreq = req; struct sock *sk_listener = req->rsk_listener; struct inet_connection_sock *icsk; diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 67efe9501581..875ff923a8ed 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -169,7 +169,7 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw, static void tw_timer_handler(struct timer_list *t) { - struct inet_timewait_sock *tw = from_timer(tw, t, tw_timer); + struct inet_timewait_sock *tw = timer_container_of(tw, t, tw_timer); inet_twsk_kill(tw); } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 77f395b28ec7..64b3fb3208af 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -123,7 +123,7 @@ static bool frag_expire_skip_icmp(u32 user) static void ip_expire(struct timer_list *t) { enum skb_drop_reason reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT; - struct inet_frag_queue *frag = from_timer(frag, t, timer); + struct inet_frag_queue *frag = timer_container_of(frag, t, timer); const struct iphdr *iph; struct sk_buff *head = NULL; struct net *net; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 678b8f96e3e9..aaeb5d16f0c9 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -668,7 +668,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ip_tunnel_adj_headroom(dev, headroom); iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl, - df, !net_eq(tunnel->net, dev_net(dev))); + df, !net_eq(tunnel->net, dev_net(dev)), 0); return; tx_error: DEV_STATS_INC(dev, tx_errors); @@ -857,7 +857,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ip_tunnel_adj_headroom(dev, max_headroom); iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl, - df, !net_eq(tunnel->net, dev_net(dev))); + df, !net_eq(tunnel->net, dev_net(dev)), 0); return; #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index f65d2f727381..cc9915543637 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -49,7 +49,8 @@ EXPORT_SYMBOL(ip6tun_encaps); void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, - __u8 tos, __u8 ttl, __be16 df, bool xnet) + __u8 tos, __u8 ttl, __be16 df, bool xnet, + u16 ipcb_flags) { int pkt_len = skb->len - skb_inner_network_offset(skb); struct net *net = dev_net(rt->dst.dev); @@ -62,6 +63,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, skb_clear_hash_if_not_l4(skb); skb_dst_set(skb, &rt->dst); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + IPCB(skb)->flags = ipcb_flags; /* Push down and install the IP header. */ skb_push(skb, sizeof(struct iphdr)); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 2ff2f79c7351..f78c4e53dc8c 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -765,7 +765,7 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) /* Timer process for the unresolved queue. */ static void ipmr_expire_process(struct timer_list *t) { - struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer); + struct mr_table *mrt = timer_container_of(mrt, t, ipmr_expire_timer); struct mr_mfc *c, *next; unsigned long expires; unsigned long now; @@ -1853,20 +1853,19 @@ static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt, /* Processing handlers for ipmr_forward, under rcu_read_lock() */ -static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, - int in_vifi, struct sk_buff *skb, int vifi) +static int ipmr_prepare_xmit(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, int vifi) { const struct iphdr *iph = ip_hdr(skb); struct vif_device *vif = &mrt->vif_table[vifi]; struct net_device *vif_dev; - struct net_device *dev; struct rtable *rt; struct flowi4 fl4; int encap = 0; vif_dev = vif_dev_read(vif); if (!vif_dev) - goto out_free; + return -1; if (vif->flags & VIFF_REGISTER) { WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1); @@ -1874,12 +1873,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, DEV_STATS_ADD(vif_dev, tx_bytes, skb->len); DEV_STATS_INC(vif_dev, tx_packets); ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); - goto out_free; + return -1; } - if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi)) - goto out_free; - if (vif->flags & VIFF_TUNNEL) { rt = ip_route_output_ports(net, &fl4, NULL, vif->remote, vif->local, @@ -1887,7 +1883,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, IPPROTO_IPIP, iph->tos & INET_DSCP_MASK, vif->link); if (IS_ERR(rt)) - goto out_free; + return -1; encap = sizeof(struct iphdr); } else { rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0, @@ -1895,11 +1891,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, IPPROTO_IPIP, iph->tos & INET_DSCP_MASK, vif->link); if (IS_ERR(rt)) - goto out_free; + return -1; } - dev = rt->dst.dev; - if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { /* Do not fragment multicasts. Alas, IPv4 does not * allow to send ICMP, so that packets will disappear @@ -1907,14 +1901,14 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, */ IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); ip_rt_put(rt); - goto out_free; + return -1; } - encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; + encap += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len; if (skb_cow(skb, encap)) { ip_rt_put(rt); - goto out_free; + return -1; } WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1); @@ -1934,6 +1928,22 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, DEV_STATS_ADD(vif_dev, tx_bytes, skb->len); } + return 0; +} + +static void ipmr_queue_fwd_xmit(struct net *net, struct mr_table *mrt, + int in_vifi, struct sk_buff *skb, int vifi) +{ + struct rtable *rt; + + if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi)) + goto out_free; + + if (ipmr_prepare_xmit(net, mrt, skb, vifi)) + goto out_free; + + rt = skb_rtable(skb); + IPCB(skb)->flags |= IPSKB_FORWARDED; /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally @@ -1947,7 +1957,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, * result in receiving multiple packets. */ NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, - net, NULL, skb, skb->dev, dev, + net, NULL, skb, skb->dev, rt->dst.dev, ipmr_forward_finish); return; @@ -1955,6 +1965,19 @@ out_free: kfree_skb(skb); } +static void ipmr_queue_output_xmit(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, int vifi) +{ + if (ipmr_prepare_xmit(net, mrt, skb, vifi)) + goto out_free; + + ip_mc_output(net, NULL, skb); + return; + +out_free: + kfree_skb(skb); +} + /* Called with mrt_lock or rcu_read_lock() */ static int ipmr_find_vif(const struct mr_table *mrt, struct net_device *dev) { @@ -2065,8 +2088,8 @@ forward: struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) - ipmr_queue_xmit(net, mrt, true_vifi, - skb2, psend); + ipmr_queue_fwd_xmit(net, mrt, true_vifi, + skb2, psend); } psend = ct; } @@ -2077,10 +2100,10 @@ last_forward: struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) - ipmr_queue_xmit(net, mrt, true_vifi, skb2, - psend); + ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb2, + psend); } else { - ipmr_queue_xmit(net, mrt, true_vifi, skb, psend); + ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb, psend); return; } } @@ -2214,6 +2237,110 @@ dont_forward: return 0; } +static void ip_mr_output_finish(struct net *net, struct mr_table *mrt, + struct net_device *dev, struct sk_buff *skb, + struct mfc_cache *c) +{ + int psend = -1; + int ct; + + atomic_long_inc(&c->_c.mfc_un.res.pkt); + atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes); + WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies); + + /* Forward the frame */ + if (c->mfc_origin == htonl(INADDR_ANY) && + c->mfc_mcastgrp == htonl(INADDR_ANY)) { + if (ip_hdr(skb)->ttl > + c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) { + /* It's an (*,*) entry and the packet is not coming from + * the upstream: forward the packet to the upstream + * only. + */ + psend = c->_c.mfc_parent; + goto last_xmit; + } + goto dont_xmit; + } + + for (ct = c->_c.mfc_un.res.maxvif - 1; + ct >= c->_c.mfc_un.res.minvif; ct--) { + if (ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) { + if (psend != -1) { + struct sk_buff *skb2; + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) + ipmr_queue_output_xmit(net, mrt, + skb2, psend); + } + psend = ct; + } + } + +last_xmit: + if (psend != -1) { + ipmr_queue_output_xmit(net, mrt, skb, psend); + return; + } + +dont_xmit: + kfree_skb(skb); +} + +/* Multicast packets for forwarding arrive here + * Called with rcu_read_lock(); + */ +int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + struct rtable *rt = skb_rtable(skb); + struct mfc_cache *cache; + struct net_device *dev; + struct mr_table *mrt; + int vif; + + WARN_ON_ONCE(!rcu_read_lock_held()); + dev = rt->dst.dev; + + if (IPCB(skb)->flags & IPSKB_FORWARDED) + goto mc_output; + if (!(IPCB(skb)->flags & IPSKB_MCROUTE)) + goto mc_output; + + skb->dev = dev; + + mrt = ipmr_rt_fib_lookup(net, skb); + if (IS_ERR(mrt)) + goto mc_output; + + /* already under rcu_read_lock() */ + cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); + if (!cache) { + vif = ipmr_find_vif(mrt, dev); + if (vif >= 0) + cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, + vif); + } + + /* No usable cache entry */ + if (!cache) { + vif = ipmr_find_vif(mrt, dev); + if (vif >= 0) + return ipmr_cache_unresolved(mrt, vif, skb, dev); + goto mc_output; + } + + vif = cache->_c.mfc_parent; + if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) + goto mc_output; + + ip_mr_output_finish(net, mrt, dev, skb, cache); + return 0; + +mc_output: + return ip_mc_output(net, sk, skb); +} + #ifdef CONFIG_IP_PIMSM_V1 /* Handle IGMP messages of PIMv1 */ int pim_rcv_v1(struct sk_buff *skb) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index fccb05fb3a79..3ddf6bf40357 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2660,7 +2660,7 @@ add: if (IN_DEV_MFORWARD(in_dev) && !ipv4_is_local_multicast(fl4->daddr)) { rth->dst.input = ip_mr_input; - rth->dst.output = ip_mc_output; + rth->dst.output = ip_mr_output; } } #endif diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f64f8276a73c..8a3c99246d2e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -5053,9 +5053,8 @@ static void __init tcp_struct_check(void) CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs); - CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, lost_skb_hint); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint); - CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 40); + CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 32); /* TXRX read-mostly hotpath cache lines */ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset); @@ -5243,6 +5242,6 @@ void __init tcp_init(void) tcp_v4_init(); tcp_metrics_init(); BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); - tcp_tasklet_init(); + tcp_tsq_work_init(); mptcp_init(); } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 9b83d639b5ac..5107121c5e37 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -3,6 +3,7 @@ #include <linux/tcp.h> #include <linux/rcupdate.h> #include <net/tcp.h> +#include <net/busy_poll.h> void tcp_fastopen_init_key_once(struct net *net) { @@ -279,6 +280,8 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, refcount_set(&req->rsk_refcnt, 2); + sk_mark_napi_id_set(child, skb); + /* Now finish processing the fastopen child socket. */ tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8ec92dec321a..3e70b31076b2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1451,11 +1451,6 @@ static u8 tcp_sacktag_one(struct sock *sk, tp->sacked_out += pcount; /* Out-of-order packets delivered */ state->sack_delivered += pcount; - - /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ - if (tp->lost_skb_hint && - before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) - tp->lost_cnt_hint += pcount; } /* D-SACK. We can detect redundant retransmission in S|R and plain R @@ -1496,9 +1491,6 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, tcp_skb_timestamp_us(skb)); tcp_rate_skb_delivered(sk, skb, state->rate); - if (skb == tp->lost_skb_hint) - tp->lost_cnt_hint += pcount; - TCP_SKB_CB(prev)->end_seq += shifted; TCP_SKB_CB(skb)->seq += shifted; @@ -1531,10 +1523,6 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = prev; - if (skb == tp->lost_skb_hint) { - tp->lost_skb_hint = prev; - tp->lost_cnt_hint -= tcp_skb_pcount(prev); - } TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor; @@ -2151,12 +2139,6 @@ static inline void tcp_init_undo(struct tcp_sock *tp) tp->undo_retrans = -1; } -static bool tcp_is_rack(const struct sock *sk) -{ - return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & - TCP_RACK_LOSS_DETECTION; -} - /* If we detect SACK reneging, forget all SACK information * and reset tags completely, otherwise preserve SACKs. If receiver * dropped its ofo queue, we will know this due to reneging detection. @@ -2182,8 +2164,7 @@ static void tcp_timeout_mark_lost(struct sock *sk) skb_rbtree_walk_from(skb) { if (is_reneg) TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; - else if (tcp_is_rack(sk) && skb != head && - tcp_rack_skb_timeout(tp, skb, 0) > 0) + else if (skb != head && tcp_rack_skb_timeout(tp, skb, 0) > 0) continue; /* Don't mark recently sent ones lost yet */ tcp_mark_skb_lost(sk, skb); } @@ -2264,22 +2245,6 @@ static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag) return false; } -/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs - * counter when SACK is enabled (without SACK, sacked_out is used for - * that purpose). - * - * With reordering, holes may still be in flight, so RFC3517 recovery - * uses pure sacked_out (total number of SACKed segments) even though - * it violates the RFC that uses duplicate ACKs, often these are equal - * but when e.g. out-of-window ACKs or packet duplication occurs, - * they differ. Since neither occurs due to loss, TCP should really - * ignore them. - */ -static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) -{ - return tp->sacked_out + 1; -} - /* Linux NewReno/SACK/ECN state machine. * -------------------------------------- * @@ -2332,13 +2297,7 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) * * If the receiver supports SACK: * - * RFC6675/3517: It is the conventional algorithm. A packet is - * considered lost if the number of higher sequence packets - * SACKed is greater than or equal the DUPACK thoreshold - * (reordering). This is implemented in tcp_mark_head_lost and - * tcp_update_scoreboard. - * - * RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm + * RACK (RFC8985): RACK is a newer loss detection algorithm * (2017-) that checks timing instead of counting DUPACKs. * Essentially a packet is considered lost if it's not S/ACKed * after RTT + reordering_window, where both metrics are @@ -2353,8 +2312,8 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) * is lost (NewReno). This heuristics are the same in NewReno * and SACK. * - * Really tricky (and requiring careful tuning) part of algorithm - * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). + * The really tricky (and requiring careful tuning) part of the algorithm + * is hidden in the RACK code in tcp_recovery.c and tcp_xmit_retransmit_queue(). * The first determines the moment _when_ we should reduce CWND and, * hence, slow down forward transmission. In fact, it determines the moment * when we decide that hole is caused by loss, rather than by a reorder. @@ -2381,79 +2340,8 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); - /* Trick#1: The loss is proven. */ - if (tp->lost_out) - return true; - - /* Not-A-Trick#2 : Classic rule... */ - if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering) - return true; - - return false; -} - -/* Detect loss in event "A" above by marking head of queue up as lost. - * For RFC3517 SACK, a segment is considered lost if it - * has at least tp->reordering SACKed seqments above it; "packets" refers to - * the maximum SACKed segments to pass before reaching this limit. - */ -static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) -{ - struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb; - int cnt; - /* Use SACK to deduce losses of new sequences sent during recovery */ - const u32 loss_high = tp->snd_nxt; - - WARN_ON(packets > tp->packets_out); - skb = tp->lost_skb_hint; - if (skb) { - /* Head already handled? */ - if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una)) - return; - cnt = tp->lost_cnt_hint; - } else { - skb = tcp_rtx_queue_head(sk); - cnt = 0; - } - - skb_rbtree_walk_from(skb) { - /* TODO: do this better */ - /* this is not the most efficient way to do this... */ - tp->lost_skb_hint = skb; - tp->lost_cnt_hint = cnt; - - if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) - break; - - if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) - cnt += tcp_skb_pcount(skb); - - if (cnt > packets) - break; - - if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST)) - tcp_mark_skb_lost(sk, skb); - - if (mark_head) - break; - } - tcp_verify_left_out(tp); -} - -/* Account newly detected lost packet(s) */ - -static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) -{ - struct tcp_sock *tp = tcp_sk(sk); - - if (tcp_is_sack(tp)) { - int sacked_upto = tp->sacked_out - tp->reordering; - if (sacked_upto >= 0) - tcp_mark_head_lost(sk, sacked_upto, 0); - else if (fast_rexmit) - tcp_mark_head_lost(sk, 1, 1); - } + /* Has loss detection marked at least one packet lost? */ + return tp->lost_out != 0; } static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) @@ -2479,20 +2367,33 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) { const struct sock *sk = (const struct sock *)tp; - if (tp->retrans_stamp && - tcp_tsopt_ecr_before(tp, tp->retrans_stamp)) - return true; /* got echoed TS before first retransmission */ + /* Received an echoed timestamp before the first retransmission? */ + if (tp->retrans_stamp) + return tcp_tsopt_ecr_before(tp, tp->retrans_stamp); + + /* We set tp->retrans_stamp upon the first retransmission of a loss + * recovery episode, so normally if tp->retrans_stamp is 0 then no + * retransmission has happened yet (likely due to TSQ, which can cause + * fast retransmits to be delayed). So if snd_una advanced while + * (tp->retrans_stamp is 0 then apparently a packet was merely delayed, + * not lost. But there are exceptions where we retransmit but then + * clear tp->retrans_stamp, so we check for those exceptions. + */ - /* Check if nothing was retransmitted (retrans_stamp==0), which may - * happen in fast recovery due to TSQ. But we ignore zero retrans_stamp - * in TCP_SYN_SENT, since when we set FLAG_SYN_ACKED we also clear - * retrans_stamp even if we had retransmitted the SYN. + /* (1) For non-SACK connections, tcp_is_non_sack_preventing_reopen() + * clears tp->retrans_stamp when snd_una == high_seq. */ - if (!tp->retrans_stamp && /* no record of a retransmit/SYN? */ - sk->sk_state != TCP_SYN_SENT) /* not the FLAG_SYN_ACKED case? */ - return true; /* nothing was retransmitted */ + if (!tcp_is_sack(tp) && !before(tp->snd_una, tp->high_seq)) + return false; - return false; + /* (2) In TCP_SYN_SENT tcp_clean_rtx_queue() clears tp->retrans_stamp + * when setting FLAG_SYN_ACKED is set, even if the SYN was + * retransmitted. + */ + if (sk->sk_state == TCP_SYN_SENT) + return false; + + return true; /* tp->retrans_stamp is zero; no retransmit yet */ } /* Undo procedures. */ @@ -2881,8 +2782,6 @@ void tcp_simple_retransmit(struct sock *sk) tcp_mark_skb_lost(sk, skb); } - tcp_clear_retrans_hints_partial(tp); - if (!tp->lost_out) return; @@ -2990,17 +2889,8 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack, *rexmit = REXMIT_LOST; } -static bool tcp_force_fast_retransmit(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - return after(tcp_highest_sack_seq(tp), - tp->snd_una + tp->reordering * tp->mss_cache); -} - /* Undo during fast recovery after partial ACK. */ -static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una, - bool *do_lost) +static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); @@ -3025,9 +2915,6 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una, tcp_undo_cwnd_reduction(sk, true); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); tcp_try_keep_open(sk); - } else { - /* Partial ACK arrived. Force fast retransmit. */ - *do_lost = tcp_force_fast_retransmit(sk); } return false; } @@ -3041,7 +2928,7 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag) if (unlikely(tcp_is_reno(tp))) { tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED); - } else if (tcp_is_rack(sk)) { + } else { u32 prior_retrans = tp->retrans_out; if (tcp_rack_mark_lost(sk)) @@ -3068,10 +2955,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - int fast_rexmit = 0, flag = *ack_flag; + int flag = *ack_flag; bool ece_ack = flag & FLAG_ECE; - bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) && - tcp_force_fast_retransmit(sk)); if (!tp->packets_out && tp->sacked_out) tp->sacked_out = 0; @@ -3120,7 +3005,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (tcp_is_reno(tp)) tcp_add_reno_sack(sk, num_dupack, ece_ack); - } else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost)) + } else if (tcp_try_undo_partial(sk, prior_snd_una)) return; if (tcp_try_undo_dsack(sk)) @@ -3175,11 +3060,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, /* Otherwise enter Recovery state */ tcp_enter_recovery(sk, ece_ack); - fast_rexmit = 1; } - if (!tcp_is_rack(sk) && do_lost) - tcp_update_scoreboard(sk, fast_rexmit); *rexmit = REXMIT_LOST; } @@ -3435,8 +3317,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb, next = skb_rb_next(skb); if (unlikely(skb == tp->retransmit_skb_hint)) tp->retransmit_skb_hint = NULL; - if (unlikely(skb == tp->lost_skb_hint)) - tp->lost_skb_hint = NULL; tcp_highest_sack_replace(sk, skb, next); tcp_rtx_queue_unlink_and_free(skb, sk); } @@ -3494,14 +3374,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb, if (flag & FLAG_RETRANS_DATA_ACKED) flag &= ~FLAG_ORIG_SACK_ACKED; } else { - int delta; - /* Non-retransmitted hole got filled? That's reordering */ if (before(reord, prior_fack)) tcp_check_sack_reordering(sk, reord, 0); - - delta = prior_sacked - tp->sacked_out; - tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); } } else if (skb && rtt_update && sack_rtt_us >= 0 && sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3ac8d2d17e1f..28f840724fe8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1066,15 +1066,15 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb * needs to be reallocated in a driver. * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc * - * Since transmit from skb destructor is forbidden, we use a tasklet + * Since transmit from skb destructor is forbidden, we use a BH work item * to process all sockets that eventually need to send more skbs. - * We use one tasklet per cpu, with its own queue of sockets. + * We use one work item per cpu, with its own queue of sockets. */ -struct tsq_tasklet { - struct tasklet_struct tasklet; +struct tsq_work { + struct work_struct work; struct list_head head; /* queue of tcp sockets */ }; -static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); +static DEFINE_PER_CPU(struct tsq_work, tsq_work); static void tcp_tsq_write(struct sock *sk) { @@ -1104,14 +1104,14 @@ static void tcp_tsq_handler(struct sock *sk) bh_unlock_sock(sk); } /* - * One tasklet per cpu tries to send more skbs. - * We run in tasklet context but need to disable irqs when + * One work item per cpu tries to send more skbs. + * We run in BH context but need to disable irqs when * transferring tsq->head because tcp_wfree() might * interrupt us (non NAPI drivers) */ -static void tcp_tasklet_func(struct tasklet_struct *t) +static void tcp_tsq_workfn(struct work_struct *work) { - struct tsq_tasklet *tsq = from_tasklet(tsq, t, tasklet); + struct tsq_work *tsq = container_of(work, struct tsq_work, work); LIST_HEAD(list); unsigned long flags; struct list_head *q, *n; @@ -1181,15 +1181,15 @@ void tcp_release_cb(struct sock *sk) } EXPORT_IPV6_MOD(tcp_release_cb); -void __init tcp_tasklet_init(void) +void __init tcp_tsq_work_init(void) { int i; for_each_possible_cpu(i) { - struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); + struct tsq_work *tsq = &per_cpu(tsq_work, i); INIT_LIST_HEAD(&tsq->head); - tasklet_setup(&tsq->tasklet, tcp_tasklet_func); + INIT_WORK(&tsq->work, tcp_tsq_workfn); } } @@ -1203,11 +1203,11 @@ void tcp_wfree(struct sk_buff *skb) struct sock *sk = skb->sk; struct tcp_sock *tp = tcp_sk(sk); unsigned long flags, nval, oval; - struct tsq_tasklet *tsq; + struct tsq_work *tsq; bool empty; /* Keep one reference on sk_wmem_alloc. - * Will be released by sk_free() from here or tcp_tasklet_func() + * Will be released by sk_free() from here or tcp_tsq_workfn() */ WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); @@ -1229,13 +1229,13 @@ void tcp_wfree(struct sk_buff *skb) nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED; } while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval)); - /* queue this socket to tasklet queue */ + /* queue this socket to BH workqueue */ local_irq_save(flags); - tsq = this_cpu_ptr(&tsq_tasklet); + tsq = this_cpu_ptr(&tsq_work); empty = list_empty(&tsq->head); list_add(&tp->tsq_node, &tsq->head); if (empty) - tasklet_schedule(&tsq->tasklet); + queue_work(system_bh_wq, &tsq->work); local_irq_restore(flags); return; out: @@ -1554,11 +1554,6 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de if (tcp_is_reno(tp) && decr > 0) tp->sacked_out -= min_t(u32, tp->sacked_out, decr); - if (tp->lost_skb_hint && - before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && - (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) - tp->lost_cnt_hint -= decr; - tcp_verify_left_out(tp); } @@ -2639,7 +2634,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, if (refcount_read(&sk->sk_wmem_alloc) > limit) { /* Always send skb if rtx queue is empty or has one skb. * No need to wait for TX completion to call us back, - * after softirq/tasklet schedule. + * after softirq schedule. * This helps when TX completions are delayed too much. */ if (tcp_rtx_queue_empty_or_single_skb(sk)) @@ -3252,7 +3247,6 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; /* changed transmit queue under us so clear hints */ - tcp_clear_retrans_hints_partial(tp); if (next_skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = skb; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index e4c616bbd727..bb37e24b97a7 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -359,7 +359,7 @@ void tcp_delack_timer_handler(struct sock *sk) static void tcp_delack_timer(struct timer_list *t) { struct inet_connection_sock *icsk = - from_timer(icsk, t, icsk_delack_timer); + timer_container_of(icsk, t, icsk_delack_timer); struct sock *sk = &icsk->icsk_inet.sk; /* Avoid taking socket spinlock if there is no ACK to send. @@ -726,7 +726,7 @@ void tcp_write_timer_handler(struct sock *sk) static void tcp_write_timer(struct timer_list *t) { struct inet_connection_sock *icsk = - from_timer(icsk, t, icsk_retransmit_timer); + timer_container_of(icsk, t, icsk_retransmit_timer); struct sock *sk = &icsk->icsk_inet.sk; /* Avoid locking the socket when there is no pending event. */ @@ -778,7 +778,7 @@ EXPORT_IPV6_MOD_GPL(tcp_set_keepalive); static void tcp_keepalive_timer(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); u32 elapsed; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 9c775f8aa438..85b5aa82d7d7 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -495,6 +495,7 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, bool copy_dtor; __sum16 check; __be16 newlen; + int ret = 0; mss = skb_shinfo(gso_skb)->gso_size; if (gso_skb->len <= sizeof(*uh) + mss) @@ -523,6 +524,10 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) return __udp_gso_segment_list(gso_skb, features, is_ipv6); + ret = __skb_linearize(gso_skb); + if (ret) + return ERR_PTR(ret); + /* Setup csum, as fraglist skips this in udp4_gro_receive. */ gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head; gso_skb->csum_offset = offsetof(struct udphdr, check); diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c index 2326548997d3..fce945f23069 100644 --- a/net/ipv4/udp_tunnel_core.c +++ b/net/ipv4/udp_tunnel_core.c @@ -134,15 +134,17 @@ void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type) struct udp_tunnel_info ti; struct net_device *dev; + ASSERT_RTNL(); + ti.type = type; ti.sa_family = sk->sk_family; ti.port = inet_sk(sk)->inet_sport; - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { + for_each_netdev(net, dev) { + udp_tunnel_nic_lock(dev); udp_tunnel_nic_add_port(dev, &ti); + udp_tunnel_nic_unlock(dev); } - rcu_read_unlock(); } EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port); @@ -154,22 +156,24 @@ void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type) struct udp_tunnel_info ti; struct net_device *dev; + ASSERT_RTNL(); + ti.type = type; ti.sa_family = sk->sk_family; ti.port = inet_sk(sk)->inet_sport; - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { + for_each_netdev(net, dev) { + udp_tunnel_nic_lock(dev); udp_tunnel_nic_del_port(dev, &ti); + udp_tunnel_nic_unlock(dev); } - rcu_read_unlock(); } EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port); void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, - bool xnet, bool nocheck) + bool xnet, bool nocheck, u16 ipcb_flags) { struct udphdr *uh; @@ -185,7 +189,8 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb udp_set_csum(nocheck, skb, src, dst, skb->len); - iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet); + iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet, + ipcb_flags); } EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c index b6d2d16189c0..ff66db48453c 100644 --- a/net/ipv4/udp_tunnel_nic.c +++ b/net/ipv4/udp_tunnel_nic.c @@ -29,6 +29,7 @@ struct udp_tunnel_nic_table_entry { * struct udp_tunnel_nic - UDP tunnel port offload state * @work: async work for talking to hardware from process context * @dev: netdev pointer + * @lock: protects all fields * @need_sync: at least one port start changed * @need_replay: space was freed, we need a replay of all ports * @work_pending: @work is currently scheduled @@ -41,6 +42,8 @@ struct udp_tunnel_nic { struct net_device *dev; + struct mutex lock; + u8 need_sync:1; u8 need_replay:1; u8 work_pending:1; @@ -298,22 +301,11 @@ __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) static void udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) { - const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; - bool may_sleep; - if (!utn->need_sync) return; - /* Drivers which sleep in the callback need to update from - * the workqueue, if we come from the tunnel driver's notification. - */ - may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP; - if (!may_sleep) - __udp_tunnel_nic_device_sync(dev, utn); - if (may_sleep || utn->need_replay) { - queue_work(udp_tunnel_nic_workqueue, &utn->work); - utn->work_pending = 1; - } + queue_work(udp_tunnel_nic_workqueue, &utn->work); + utn->work_pending = 1; } static bool @@ -554,12 +546,12 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev) struct udp_tunnel_nic *utn; unsigned int i, j; - ASSERT_RTNL(); - utn = dev->udp_tunnel_nic; if (!utn) return; + mutex_lock(&utn->lock); + utn->need_sync = false; for (i = 0; i < utn->n_tables; i++) for (j = 0; j < info->tables[i].n_entries; j++) { @@ -569,7 +561,7 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev) entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL | UDP_TUNNEL_NIC_ENTRY_OP_FAIL); - /* We don't release rtnl across ops */ + /* We don't release utn lock across ops */ WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN); if (!entry->use_cnt) continue; @@ -579,6 +571,8 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev) } __udp_tunnel_nic_device_sync(dev, utn); + + mutex_unlock(&utn->lock); } static size_t @@ -643,6 +637,33 @@ err_cancel: return -EMSGSIZE; } +static void __udp_tunnel_nic_assert_locked(struct net_device *dev) +{ + struct udp_tunnel_nic *utn; + + utn = dev->udp_tunnel_nic; + if (utn) + lockdep_assert_held(&utn->lock); +} + +static void __udp_tunnel_nic_lock(struct net_device *dev) +{ + struct udp_tunnel_nic *utn; + + utn = dev->udp_tunnel_nic; + if (utn) + mutex_lock(&utn->lock); +} + +static void __udp_tunnel_nic_unlock(struct net_device *dev) +{ + struct udp_tunnel_nic *utn; + + utn = dev->udp_tunnel_nic; + if (utn) + mutex_unlock(&utn->lock); +} + static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = { .get_port = __udp_tunnel_nic_get_port, .set_port_priv = __udp_tunnel_nic_set_port_priv, @@ -651,6 +672,9 @@ static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = { .reset_ntf = __udp_tunnel_nic_reset_ntf, .dump_size = __udp_tunnel_nic_dump_size, .dump_write = __udp_tunnel_nic_dump_write, + .assert_locked = __udp_tunnel_nic_assert_locked, + .lock = __udp_tunnel_nic_lock, + .unlock = __udp_tunnel_nic_unlock, }; static void @@ -710,11 +734,15 @@ static void udp_tunnel_nic_device_sync_work(struct work_struct *work) container_of(work, struct udp_tunnel_nic, work); rtnl_lock(); + mutex_lock(&utn->lock); + utn->work_pending = 0; __udp_tunnel_nic_device_sync(utn->dev, utn); if (utn->need_replay) udp_tunnel_nic_replay(utn->dev, utn); + + mutex_unlock(&utn->lock); rtnl_unlock(); } @@ -730,6 +758,7 @@ udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info, return NULL; utn->n_tables = n_tables; INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work); + mutex_init(&utn->lock); for (i = 0; i < n_tables; i++) { utn->entries[i] = kcalloc(info->tables[i].n_entries, @@ -821,8 +850,11 @@ static int udp_tunnel_nic_register(struct net_device *dev) dev_hold(dev); dev->udp_tunnel_nic = utn; - if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) + if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) { + udp_tunnel_nic_lock(dev); udp_tunnel_get_rx_info(dev); + udp_tunnel_nic_unlock(dev); + } return 0; } @@ -832,6 +864,8 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) { const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + udp_tunnel_nic_lock(dev); + /* For a shared table remove this dev from the list of sharing devices * and if there are other devices just detach. */ @@ -841,8 +875,10 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) list_for_each_entry(node, &info->shared->devices, list) if (node->dev == dev) break; - if (list_entry_is_head(node, &info->shared->devices, list)) + if (list_entry_is_head(node, &info->shared->devices, list)) { + udp_tunnel_nic_unlock(dev); return; + } list_del(&node->list); kfree(node); @@ -852,6 +888,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) if (first) { udp_tunnel_drop_rx_info(dev); utn->dev = first->dev; + udp_tunnel_nic_unlock(dev); goto release_dev; } @@ -862,6 +899,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) * from the work which we will boot immediately. */ udp_tunnel_nic_flush(dev, utn); + udp_tunnel_nic_unlock(dev); /* Wait for the work to be done using the state, netdev core will * retry unregister until we give up our reference on this device. @@ -910,12 +948,16 @@ udp_tunnel_nic_netdevice_event(struct notifier_block *unused, return NOTIFY_DONE; if (event == NETDEV_UP) { + udp_tunnel_nic_lock(dev); WARN_ON(!udp_tunnel_nic_is_empty(dev, utn)); udp_tunnel_get_rx_info(dev); + udp_tunnel_nic_unlock(dev); return NOTIFY_OK; } if (event == NETDEV_GOING_DOWN) { + udp_tunnel_nic_lock(dev); udp_tunnel_nic_flush(dev, utn); + udp_tunnel_nic_unlock(dev); return NOTIFY_OK; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 43b19adfbf88..9c297974d3a6 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3208,7 +3208,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, } } -#if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE) +#if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) static void add_v4_addrs(struct inet6_dev *idev) { struct in6_addr addr; @@ -3463,6 +3463,7 @@ static void addrconf_dev_config(struct net_device *dev) (dev->type != ARPHRD_IEEE1394) && (dev->type != ARPHRD_TUNNEL6) && (dev->type != ARPHRD_6LOWPAN) && + (dev->type != ARPHRD_IP6GRE) && (dev->type != ARPHRD_TUNNEL) && (dev->type != ARPHRD_NONE) && (dev->type != ARPHRD_RAWIP)) { @@ -3518,7 +3519,7 @@ static void addrconf_sit_config(struct net_device *dev) } #endif -#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE) +#if IS_ENABLED(CONFIG_NET_IPGRE) static void addrconf_gre_config(struct net_device *dev) { struct inet6_dev *idev; @@ -3536,7 +3537,7 @@ static void addrconf_gre_config(struct net_device *dev) * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this * case). Such devices fall back to add_v4_addrs() instead. */ - if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 && + if (!(*(__be32 *)dev->dev_addr == 0 && idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) { addrconf_addr_gen(idev, true); return; @@ -3557,8 +3558,7 @@ static void addrconf_init_auto_addrs(struct net_device *dev) addrconf_sit_config(dev); break; #endif -#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE) - case ARPHRD_IP6GRE: +#if IS_ENABLED(CONFIG_NET_IPGRE) case ARPHRD_IPGRE: addrconf_gre_config(dev); break; @@ -4017,7 +4017,7 @@ restart: static void addrconf_rs_timer(struct timer_list *t) { - struct inet6_dev *idev = from_timer(idev, t, rs_timer); + struct inet6_dev *idev = timer_container_of(idev, t, rs_timer); struct net_device *dev = idev->dev; struct in6_addr lladdr; int rtr_solicits; diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 62618a058b8f..a247bb93908b 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -1207,6 +1207,10 @@ static int calipso_req_setattr(struct request_sock *req, struct ipv6_opt_hdr *old, *new; struct sock *sk = sk_to_full_sk(req_to_sk(req)); + /* sk is NULL for SYN+ACK w/ SYN Cookie */ + if (!sk) + return -ENOMEM; + if (req_inet->ipv6_opt && req_inet->ipv6_opt->hopopt) old = req_inet->ipv6_opt->hopopt; else @@ -1247,6 +1251,10 @@ static void calipso_req_delattr(struct request_sock *req) struct ipv6_txoptions *txopts; struct sock *sk = sk_to_full_sk(req_to_sk(req)); + /* sk is NULL for SYN+ACK w/ SYN Cookie */ + if (!sk) + return; + if (!req_inet->ipv6_opt || !req_inet->ipv6_opt->hopopt) return; diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c index 95e9146918cc..b8d43ed4689d 100644 --- a/net/ipv6/ila/ila_common.c +++ b/net/ipv6/ila/ila_common.c @@ -86,7 +86,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb, diff = get_csum_diff(ip6h, p); inet_proto_csum_replace_by_diff(&th->check, skb, - diff, true); + diff, true, true); } break; case NEXTHDR_UDP: @@ -97,7 +97,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb, if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { diff = get_csum_diff(ip6h, p); inet_proto_csum_replace_by_diff(&uh->check, skb, - diff, true); + diff, true, true); if (!uh->check) uh->check = CSUM_MANGLED_0; } @@ -111,7 +111,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb, diff = get_csum_diff(ip6h, p); inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb, - diff, true); + diff, true, true); } break; } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 7094d7708686..93578b2ec35f 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -2442,7 +2442,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force) static void fib6_gc_timer_cb(struct timer_list *t) { - struct net *arg = from_timer(arg, t, ipv6.ip6_fib_timer); + struct net *arg = timer_container_of(arg, t, ipv6.ip6_fib_timer); fib6_run_gc(0, arg, true); } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 894d3158a6f0..a885bb5c98ea 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1278,7 +1278,7 @@ route_lookup: ipv6h->nexthdr = proto; ipv6h->saddr = fl6->saddr; ipv6h->daddr = fl6->daddr; - ip6tunnel_xmit(NULL, skb, dev); + ip6tunnel_xmit(NULL, skb, dev, 0); return 0; tx_err_link_failure: DEV_STATS_INC(dev, tx_carrier_errors); diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index c99053189ea8..8ebe17a6058a 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c @@ -74,13 +74,14 @@ error: } EXPORT_SYMBOL_GPL(udp_sock_create6); -int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, - struct net_device *dev, - const struct in6_addr *saddr, - const struct in6_addr *daddr, - __u8 prio, __u8 ttl, __be32 label, - __be16 src_port, __be16 dst_port, bool nocheck) +void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, + struct net_device *dev, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck, + u16 ip6cb_flags) { struct udphdr *uh; struct ipv6hdr *ip6h; @@ -108,8 +109,7 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, ip6h->daddr = *daddr; ip6h->saddr = *saddr; - ip6tunnel_xmit(sk, skb, dev); - return 0; + ip6tunnel_xmit(sk, skb, dev, ip6cb_flags); } EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 3276cde5ebd7..a35f4f1c6589 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -839,7 +839,7 @@ static void ipmr_do_expire_process(struct mr_table *mrt) static void ipmr_expire_process(struct timer_list *t) { - struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer); + struct mr_table *mrt = timer_container_of(mrt, t, ipmr_expire_timer); if (!spin_trylock(&mfc_unres_lock)) { mod_timer(&mrt->ipmr_expire_timer, jiffies + 1); @@ -2035,8 +2035,8 @@ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct * Processing handlers for ip6mr_forward */ -static int ip6mr_forward2(struct net *net, struct mr_table *mrt, - struct sk_buff *skb, int vifi) +static int ip6mr_prepare_xmit(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, int vifi) { struct vif_device *vif = &mrt->vif_table[vifi]; struct net_device *vif_dev; @@ -2046,7 +2046,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt, vif_dev = vif_dev_read(vif); if (!vif_dev) - goto out_free; + return -1; #ifdef CONFIG_IPV6_PIMSM_V2 if (vif->flags & MIFF_REGISTER) { @@ -2055,7 +2055,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt, DEV_STATS_ADD(vif_dev, tx_bytes, skb->len); DEV_STATS_INC(vif_dev, tx_packets); ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT); - goto out_free; + return -1; } #endif @@ -2069,7 +2069,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt, dst = ip6_route_output(net, NULL, &fl6); if (dst->error) { dst_release(dst); - goto out_free; + return -1; } skb_dst_drop(skb); @@ -2093,20 +2093,43 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt, /* We are about to write */ /* XXX: extension headers? */ if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(vif_dev))) - goto out_free; + return -1; ipv6h = ipv6_hdr(skb); ipv6h->hop_limit--; + return 0; +} + +static void ip6mr_forward2(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, int vifi) +{ + struct net_device *indev = skb->dev; + + if (ip6mr_prepare_xmit(net, mrt, skb, vifi)) + goto out_free; IP6CB(skb)->flags |= IP6SKB_FORWARDED; - return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, - net, NULL, skb, skb->dev, vif_dev, - ip6mr_forward2_finish); + NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, + net, NULL, skb, indev, skb->dev, + ip6mr_forward2_finish); + return; + +out_free: + kfree_skb(skb); +} + +static void ip6mr_output2(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, int vifi) +{ + if (ip6mr_prepare_xmit(net, mrt, skb, vifi)) + goto out_free; + + ip6_output(net, NULL, skb); + return; out_free: kfree_skb(skb); - return 0; } /* Called with rcu_read_lock() */ @@ -2221,6 +2244,56 @@ dont_forward: kfree_skb(skb); } +/* Called under rcu_read_lock() */ +static void ip6_mr_output_finish(struct net *net, struct mr_table *mrt, + struct net_device *dev, struct sk_buff *skb, + struct mfc6_cache *c) +{ + int psend = -1; + int ct; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + atomic_long_inc(&c->_c.mfc_un.res.pkt); + atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes); + WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies); + + /* Forward the frame */ + if (ipv6_addr_any(&c->mf6c_origin) && + ipv6_addr_any(&c->mf6c_mcastgrp)) { + if (ipv6_hdr(skb)->hop_limit > + c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) { + /* It's an (*,*) entry and the packet is not coming from + * the upstream: forward the packet to the upstream + * only. + */ + psend = c->_c.mfc_parent; + goto last_forward; + } + goto dont_forward; + } + for (ct = c->_c.mfc_un.res.maxvif - 1; + ct >= c->_c.mfc_un.res.minvif; ct--) { + if (ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) { + if (psend != -1) { + struct sk_buff *skb2; + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) + ip6mr_output2(net, mrt, skb2, psend); + } + psend = ct; + } + } +last_forward: + if (psend != -1) { + ip6mr_output2(net, mrt, skb, psend); + return; + } + +dont_forward: + kfree_skb(skb); +} /* * Multicast packets for forwarding arrive here @@ -2288,6 +2361,61 @@ int ip6_mr_input(struct sk_buff *skb) return 0; } +int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + struct net_device *dev = skb_dst(skb)->dev; + struct flowi6 fl6 = (struct flowi6) { + .flowi6_iif = LOOPBACK_IFINDEX, + .flowi6_mark = skb->mark, + }; + struct mfc6_cache *cache; + struct mr_table *mrt; + int err; + int vif; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (IP6CB(skb)->flags & IP6SKB_FORWARDED) + goto ip6_output; + if (!(IP6CB(skb)->flags & IP6SKB_MCROUTE)) + goto ip6_output; + + err = ip6mr_fib_lookup(net, &fl6, &mrt); + if (err < 0) { + kfree_skb(skb); + return err; + } + + cache = ip6mr_cache_find(mrt, + &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); + if (!cache) { + vif = ip6mr_find_vif(mrt, dev); + if (vif >= 0) + cache = ip6mr_cache_find_any(mrt, + &ipv6_hdr(skb)->daddr, + vif); + } + + /* No usable cache entry */ + if (!cache) { + vif = ip6mr_find_vif(mrt, dev); + if (vif >= 0) + return ip6mr_cache_unresolved(mrt, vif, skb, dev); + goto ip6_output; + } + + /* Wrong interface */ + vif = cache->_c.mfc_parent; + if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) + goto ip6_output; + + ip6_mr_output_finish(net, mrt, dev, skb, cache); + return 0; + +ip6_output: + return ip6_output(net, sk, skb); +} + int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, u32 portid) { diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index d6bd8f7079bb..64ab23ff559b 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -133,7 +133,7 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) static void nf_ct_frag6_expire(struct timer_list *t) { - struct inet_frag_queue *frag = from_timer(frag, t, timer); + struct inet_frag_queue *frag = timer_container_of(frag, t, timer); struct frag_queue *fq; fq = container_of(frag, struct frag_queue, q); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 49740898bc13..7d4bcf3fda5b 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -73,7 +73,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, static void ip6_frag_expire(struct timer_list *t) { - struct inet_frag_queue *frag = from_timer(frag, t, timer); + struct inet_frag_queue *frag = timer_container_of(frag, t, timer); struct frag_queue *fq; fq = container_of(frag, struct frag_queue, q); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0143262094b0..df0caffefb38 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1145,6 +1145,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res) rt->dst.input = ip6_input; } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { rt->dst.input = ip6_mc_input; + rt->dst.output = ip6_mr_output; } else { rt->dst.input = ip6_forward; } @@ -3737,6 +3738,53 @@ void fib6_nh_release_dsts(struct fib6_nh *fib6_nh) } } +static int fib6_config_validate(struct fib6_config *cfg, + struct netlink_ext_ack *extack) +{ + /* RTF_PCPU is an internal flag; can not be set by userspace */ + if (cfg->fc_flags & RTF_PCPU) { + NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); + goto errout; + } + + /* RTF_CACHE is an internal flag; can not be set by userspace */ + if (cfg->fc_flags & RTF_CACHE) { + NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE"); + goto errout; + } + + if (cfg->fc_type > RTN_MAX) { + NL_SET_ERR_MSG(extack, "Invalid route type"); + goto errout; + } + + if (cfg->fc_dst_len > 128) { + NL_SET_ERR_MSG(extack, "Invalid prefix length"); + goto errout; + } + +#ifdef CONFIG_IPV6_SUBTREES + if (cfg->fc_src_len > 128) { + NL_SET_ERR_MSG(extack, "Invalid source address length"); + goto errout; + } + + if (cfg->fc_nh_id && cfg->fc_src_len) { + NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); + goto errout; + } +#else + if (cfg->fc_src_len) { + NL_SET_ERR_MSG(extack, + "Specifying source address requires IPV6_SUBTREES to be enabled"); + goto errout; + } +#endif + return 0; +errout: + return -EINVAL; +} + static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) @@ -3886,6 +3934,10 @@ int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, struct fib6_info *rt; int err; + err = fib6_config_validate(cfg, extack); + if (err) + return err; + rt = ip6_route_info_create(cfg, gfp_flags, extack); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -4479,53 +4531,6 @@ void rt6_purge_dflt_routers(struct net *net) rcu_read_unlock(); } -static int fib6_config_validate(struct fib6_config *cfg, - struct netlink_ext_ack *extack) -{ - /* RTF_PCPU is an internal flag; can not be set by userspace */ - if (cfg->fc_flags & RTF_PCPU) { - NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); - goto errout; - } - - /* RTF_CACHE is an internal flag; can not be set by userspace */ - if (cfg->fc_flags & RTF_CACHE) { - NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE"); - goto errout; - } - - if (cfg->fc_type > RTN_MAX) { - NL_SET_ERR_MSG(extack, "Invalid route type"); - goto errout; - } - - if (cfg->fc_dst_len > 128) { - NL_SET_ERR_MSG(extack, "Invalid prefix length"); - goto errout; - } - -#ifdef CONFIG_IPV6_SUBTREES - if (cfg->fc_src_len > 128) { - NL_SET_ERR_MSG(extack, "Invalid source address length"); - goto errout; - } - - if (cfg->fc_nh_id && cfg->fc_src_len) { - NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); - goto errout; - } -#else - if (cfg->fc_src_len) { - NL_SET_ERR_MSG(extack, - "Specifying source address requires IPV6_SUBTREES to be enabled"); - goto errout; - } -#endif - return 0; -errout: - return -EINVAL; -} - static void rtmsg_to_fib6_config(struct net *net, struct in6_rtmsg *rtmsg, struct fib6_config *cfg) @@ -4563,10 +4568,6 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg) switch (cmd) { case SIOCADDRT: - err = fib6_config_validate(&cfg, NULL); - if (err) - break; - /* Only do the default setting of fc_metric in route adding */ if (cfg.fc_metric == 0) cfg.fc_metric = IP6_RT_PRIO_USER; @@ -5402,6 +5403,10 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, int nhn = 0; int err; + err = fib6_config_validate(cfg, extack); + if (err) + return err; + replace = (cfg->fc_nlinfo.nlh && (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); @@ -5636,10 +5641,6 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) return err; - err = fib6_config_validate(&cfg, extack); - if (err) - return err; - if (cfg.fc_metric == 0) cfg.fc_metric = IP6_RT_PRIO_USER; diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index ac1dbd492c22..dfa825ee870e 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -270,7 +270,7 @@ static void advance_nextseg(struct ipv6_sr_hdr *srh, struct in6_addr *daddr) static int seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, - u32 tbl_id, bool local_delivery) + u32 tbl_id, bool local_delivery, int oif) { struct net *net = dev_net(skb->dev); struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -282,6 +282,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_iif = skb->dev->ifindex; + fl6.flowi6_oif = oif; fl6.daddr = nhaddr ? *nhaddr : hdr->daddr; fl6.saddr = hdr->saddr; fl6.flowlabel = ip6_flowinfo(hdr); @@ -291,17 +292,19 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, if (nhaddr) fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH; - if (!tbl_id) { + if (!tbl_id && !oif) { dst = ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags); - } else { + } else if (tbl_id) { struct fib6_table *table; table = fib6_get_table(net, tbl_id); if (!table) goto out; - rt = ip6_pol_route(net, table, 0, &fl6, skb, flags); + rt = ip6_pol_route(net, table, oif, &fl6, skb, flags); dst = &rt->dst; + } else { + dst = ip6_route_output(net, NULL, &fl6); } /* we want to discard traffic destined for local packet processing, @@ -330,7 +333,7 @@ out: int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, u32 tbl_id) { - return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false); + return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false, 0); } static __u8 seg6_flv_lcblock_octects(const struct seg6_flavors_info *finfo) @@ -418,7 +421,7 @@ static int end_next_csid_core(struct sk_buff *skb, struct seg6_local_lwt *slwt) static int input_action_end_x_finish(struct sk_buff *skb, struct seg6_local_lwt *slwt) { - seg6_lookup_nexthop(skb, &slwt->nh6, 0); + seg6_lookup_any_nexthop(skb, &slwt->nh6, 0, false, slwt->oif); return dst_input(skb); } @@ -1277,7 +1280,7 @@ static int input_action_end_dt6(struct sk_buff *skb, /* note: this time we do not need to specify the table because the VRF * takes care of selecting the correct table. */ - seg6_lookup_any_nexthop(skb, NULL, 0, true); + seg6_lookup_any_nexthop(skb, NULL, 0, true, 0); return dst_input(skb); @@ -1285,7 +1288,7 @@ legacy_mode: #endif skb_set_transport_header(skb, sizeof(struct ipv6hdr)); - seg6_lookup_any_nexthop(skb, NULL, slwt->table, true); + seg6_lookup_any_nexthop(skb, NULL, slwt->table, true, 0); return dst_input(skb); @@ -1477,7 +1480,8 @@ static struct seg6_action_desc seg6_action_table[] = { .action = SEG6_LOCAL_ACTION_END_X, .attrs = SEG6_F_ATTR(SEG6_LOCAL_NH6), .optattrs = SEG6_F_LOCAL_COUNTERS | - SEG6_F_LOCAL_FLAVORS, + SEG6_F_LOCAL_FLAVORS | + SEG6_F_ATTR(SEG6_LOCAL_OIF), .input = input_action_end_x, }, { @@ -1644,10 +1648,8 @@ static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = { [SEG6_LOCAL_SRH] = { .type = NLA_BINARY }, [SEG6_LOCAL_TABLE] = { .type = NLA_U32 }, [SEG6_LOCAL_VRFTABLE] = { .type = NLA_U32 }, - [SEG6_LOCAL_NH4] = { .type = NLA_BINARY, - .len = sizeof(struct in_addr) }, - [SEG6_LOCAL_NH6] = { .type = NLA_BINARY, - .len = sizeof(struct in6_addr) }, + [SEG6_LOCAL_NH4] = NLA_POLICY_EXACT_LEN(sizeof(struct in_addr)), + [SEG6_LOCAL_NH6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), [SEG6_LOCAL_IIF] = { .type = NLA_U32 }, [SEG6_LOCAL_OIF] = { .type = NLA_U32 }, [SEG6_LOCAL_BPF] = { .type = NLA_NESTED }, diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index a72dbca9e8fc..12496ba1b7d4 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1035,7 +1035,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, skb_set_inner_ipproto(skb, IPPROTO_IPV6); iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl, - df, !net_eq(tunnel->net, dev_net(dev))); + df, !net_eq(tunnel->net, dev_net(dev)), 0); return NETDEV_TX_OK; tx_error_icmp: diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c index 5b3f3b444d19..9fde6cf20f10 100644 --- a/net/lapb/lapb_timer.c +++ b/net/lapb/lapb_timer.c @@ -74,7 +74,7 @@ int lapb_t1timer_running(struct lapb_cb *lapb) static void lapb_t2timer_expiry(struct timer_list *t) { - struct lapb_cb *lapb = from_timer(lapb, t, t2timer); + struct lapb_cb *lapb = timer_container_of(lapb, t, t2timer); spin_lock_bh(&lapb->lock); if (timer_pending(&lapb->t2timer)) /* A new timer has been set up */ @@ -94,7 +94,7 @@ out: static void lapb_t1timer_expiry(struct timer_list *t) { - struct lapb_cb *lapb = from_timer(lapb, t, t1timer); + struct lapb_cb *lapb = timer_container_of(lapb, t, t1timer); spin_lock_bh(&lapb->lock); if (timer_pending(&lapb->t1timer)) /* A new timer has been set up */ diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 7e8fc710c590..0779daa8aa8f 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -1335,28 +1335,31 @@ static void llc_conn_tmr_common_cb(struct sock *sk, u8 type) void llc_conn_pf_cycle_tmr_cb(struct timer_list *t) { - struct llc_sock *llc = from_timer(llc, t, pf_cycle_timer.timer); + struct llc_sock *llc = timer_container_of(llc, t, + pf_cycle_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_P_TMR); } void llc_conn_busy_tmr_cb(struct timer_list *t) { - struct llc_sock *llc = from_timer(llc, t, busy_state_timer.timer); + struct llc_sock *llc = timer_container_of(llc, t, + busy_state_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_BUSY_TMR); } void llc_conn_ack_tmr_cb(struct timer_list *t) { - struct llc_sock *llc = from_timer(llc, t, ack_timer.timer); + struct llc_sock *llc = timer_container_of(llc, t, ack_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_ACK_TMR); } void llc_conn_rej_tmr_cb(struct timer_list *t) { - struct llc_sock *llc = from_timer(llc, t, rej_sent_timer.timer); + struct llc_sock *llc = timer_container_of(llc, t, + rej_sent_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_REJ_TMR); } diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 85612234742a..e38f46ffebfa 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -143,7 +143,8 @@ EXPORT_SYMBOL(ieee80211_stop_rx_ba_session); */ static void sta_rx_agg_session_timer_expired(struct timer_list *t) { - struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer); + struct tid_ampdu_rx *tid_rx = timer_container_of(tid_rx, t, + session_timer); struct sta_info *sta = tid_rx->sta; u8 tid = tid_rx->tid; unsigned long timeout; @@ -163,7 +164,8 @@ static void sta_rx_agg_session_timer_expired(struct timer_list *t) static void sta_rx_agg_reorder_timer_expired(struct timer_list *t) { - struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, reorder_timer); + struct tid_ampdu_rx *tid_rx = timer_container_of(tid_rx, t, + reorder_timer); rcu_read_lock(); ieee80211_release_reorder_timeout(tid_rx->sta, tid_rx->tid); @@ -297,7 +299,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, if (!sta->sta.valid_links && !sta->sta.deflink.ht_cap.ht_supported && - !sta->sta.deflink.he_cap.has_he) { + !sta->sta.deflink.he_cap.has_he && + !sta->sta.deflink.s1g_cap.s1g) { ht_dbg(sta->sdata, "STA %pM erroneously requests BA session on tid %d w/o HT\n", sta->sta.addr, tid); @@ -325,7 +328,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && (sta->sta.valid_links || - !(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || + !(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA) || + !(sta->sta.deflink.s1g_cap.cap[3] & S1G_CAP3_HT_DELAYED_BA))) || (buf_size > max_buf_size)) { status = WLAN_STATUS_INVALID_QOS_PARAM; ht_dbg_ratelimited(sta->sdata, diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 8dc8c3c96b96..d981b0fc57bf 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -422,7 +422,8 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, */ static void sta_addba_resp_timer_expired(struct timer_list *t) { - struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer); + struct tid_ampdu_tx *tid_tx = timer_container_of(tid_tx, t, + addba_resp_timer); struct sta_info *sta = tid_tx->sta; u8 tid = tid_tx->tid; @@ -574,7 +575,8 @@ EXPORT_SYMBOL(ieee80211_refresh_tx_agg_session_timer); */ static void sta_tx_agg_session_timer_expired(struct timer_list *t) { - struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer); + struct tid_ampdu_tx *tid_tx = timer_container_of(tid_tx, t, + session_timer); struct sta_info *sta = tid_tx->sta; u8 tid = tid_tx->tid; unsigned long timeout; @@ -614,7 +616,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, !pubsta->deflink.ht_cap.ht_supported && !pubsta->deflink.vht_cap.vht_supported && !pubsta->deflink.he_cap.has_he && - !pubsta->deflink.eht_cap.has_eht) + !pubsta->deflink.eht_cap.has_eht && + !pubsta->deflink.s1g_cap.s1g) return -EINVAL; if (WARN_ON_ONCE(!local->ops->ampdu_action)) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d9d88f2f2831..56540c3701ed 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -178,6 +178,7 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata, link_conf->nontransmitted = true; link_conf->bssid_index = params->index; + link_conf->bssid_indicator = tx_bss_conf->bssid_indicator; } if (params->ema) link_conf->ema_ap = true; @@ -885,6 +886,13 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, ret = 0; memcpy(mac, sta->sta.addr, ETH_ALEN); sta_set_sinfo(sta, sinfo, true); + + /* Add accumulated removed link data to sinfo data for + * consistency for MLO + */ + if (sinfo->valid_links) + sta_set_accumulated_removed_links_sinfo(sta, sinfo); + } return ret; @@ -912,6 +920,12 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, if (sta) { ret = 0; sta_set_sinfo(sta, sinfo, true); + + /* Add accumulated removed link data to sinfo data for + * consistency for MLO + */ + if (sinfo->valid_links) + sta_set_accumulated_removed_links_sinfo(sta, sinfo); } return ret; @@ -1218,8 +1232,11 @@ ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_copy_rnr_beacon(pos, new->rnr_ies, rnr); } /* update bssid_indicator */ - link_conf->bssid_indicator = - ilog2(__roundup_pow_of_two(mbssid->cnt + 1)); + if (new->mbssid_ies->cnt && new->mbssid_ies->elem[0].len > 2) + link_conf->bssid_indicator = + *(new->mbssid_ies->elem[0].data + 2); + else + link_conf->bssid_indicator = 0; } if (csa) { @@ -1878,6 +1895,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, params->vht_capa || params->he_capa || params->eht_capa || + params->s1g_capa || params->opmode_notif_used; switch (mode) { @@ -1956,6 +1974,10 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, params->eht_capa_len, link_sta); + if (params->s1g_capa) + ieee80211_s1g_cap_to_sta_s1g_cap(sdata, params->s1g_capa, + link_sta); + ieee80211_sta_init_nss(link_sta); if (params->opmode_notif_used) { @@ -3028,7 +3050,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev, return 0; } -static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +static int ieee80211_set_wiphy_params(struct wiphy *wiphy, int radio_idx, + u32 changed) { struct ieee80211_local *local = wiphy_priv(wiphy); int err; @@ -3036,7 +3059,8 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { ieee80211_check_fast_xmit_all(local); - err = drv_set_frag_threshold(local, wiphy->frag_threshold); + err = drv_set_frag_threshold(local, radio_idx, + wiphy->frag_threshold); if (err) { ieee80211_check_fast_xmit_all(local); @@ -3050,14 +3074,23 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) coverage_class = changed & WIPHY_PARAM_COVERAGE_CLASS ? wiphy->coverage_class : -1; - err = drv_set_coverage_class(local, coverage_class); + err = drv_set_coverage_class(local, radio_idx, + coverage_class); if (err) return err; } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { - err = drv_set_rts_threshold(local, wiphy->rts_threshold); + u32 rts_threshold; + + if ((radio_idx == -1) || (radio_idx >= wiphy->n_radio)) + rts_threshold = wiphy->rts_threshold; + else + rts_threshold = + wiphy->radio_cfg[radio_idx].rts_threshold; + + err = drv_set_rts_threshold(local, radio_idx, rts_threshold); if (err) return err; @@ -3075,18 +3108,19 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) } if (changed & (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG)) - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS); + ieee80211_hw_config(local, radio_idx, + IEEE80211_CONF_CHANGE_RETRY_LIMITS); if (changed & (WIPHY_PARAM_TXQ_LIMIT | WIPHY_PARAM_TXQ_MEMORY_LIMIT | WIPHY_PARAM_TXQ_QUANTUM)) - ieee80211_txq_set_params(local); + ieee80211_txq_set_params(local, radio_idx); return 0; } static int ieee80211_set_tx_power(struct wiphy *wiphy, - struct wireless_dev *wdev, + struct wireless_dev *wdev, int radio_idx, enum nl80211_tx_power_setting type, int mbm) { struct ieee80211_local *local = wiphy_priv(wiphy); @@ -3214,6 +3248,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy, static int ieee80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + int radio_idx, unsigned int link_id, int *dbm) { @@ -3392,7 +3427,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, } if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); ieee80211_recalc_ps(local); ieee80211_recalc_ps_vif(sdata); @@ -3549,6 +3584,56 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, return 0; } +static bool ieee80211_is_scan_ongoing(struct wiphy *wiphy, + struct ieee80211_local *local, + struct cfg80211_chan_def *chandef) +{ + struct cfg80211_scan_request *scan_req; + int chan_radio_idx, req_radio_idx; + struct ieee80211_roc_work *roc; + + if (list_empty(&local->roc_list) && !local->scanning) + return false; + + if (wiphy->n_radio < 2) + return true; + + req_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chandef->chan); + if (req_radio_idx < 0) + return true; + + if (local->scanning) { + scan_req = wiphy_dereference(wiphy, local->scan_req); + /* + * Scan is going on but info is not there. Should not happen + * but if it does, let's not take risk and assume we can't use + * the hw hence return true + */ + if (WARN_ON_ONCE(!scan_req)) + return true; + + return ieee80211_is_radio_idx_in_scan_req(wiphy, scan_req, + req_radio_idx); + } + + list_for_each_entry(roc, &local->roc_list, list) { + chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, + roc->chan); + /* + * The roc work is added but chan_radio_idx is invalid. + * Should not happen but if it does, let's not take + * risk and return true. + */ + if (chan_radio_idx < 0) + return true; + + if (chan_radio_idx == req_radio_idx) + return true; + } + + return false; +} + static int ieee80211_start_radar_detection(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef, @@ -3562,7 +3647,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy, lockdep_assert_wiphy(local->hw.wiphy); - if (!list_empty(&local->roc_list) || local->scanning) + if (ieee80211_is_scan_ongoing(wiphy, local, chandef)) return -EBUSY; link_data = sdata_dereference(sdata->link[link_id], sdata); @@ -4054,7 +4139,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, lockdep_assert_wiphy(local->hw.wiphy); - if (!list_empty(&local->roc_list) || local->scanning) + if (ieee80211_is_scan_ongoing(wiphy, local, ¶ms->chandef)) return -EBUSY; if (sdata->wdev.links[link_id].cac_started) @@ -4238,7 +4323,8 @@ ieee80211_update_mgmt_frame_registrations(struct wiphy *wiphy, ieee80211_configure_filter(local); } -static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) +static int ieee80211_set_antenna(struct wiphy *wiphy, int radio_idx, + u32 tx_ant, u32 rx_ant) { struct ieee80211_local *local = wiphy_priv(wiphy); int ret; @@ -4254,11 +4340,12 @@ static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) return 0; } -static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant) +static int ieee80211_get_antenna(struct wiphy *wiphy, int radio_idx, + u32 *tx_ant, u32 *rx_ant) { struct ieee80211_local *local = wiphy_priv(wiphy); - return drv_get_antenna(local, tx_ant, rx_ant); + return drv_get_antenna(local, radio_idx, tx_ant, rx_ant); } static int ieee80211_set_rekey_data(struct wiphy *wiphy, diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 3aaf5abf1acc..4bcbcf9d98b5 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -644,15 +644,39 @@ ieee80211_find_chanctx(struct ieee80211_local *local, return NULL; } -bool ieee80211_is_radar_required(struct ieee80211_local *local) +bool ieee80211_is_radar_required(struct ieee80211_local *local, + struct cfg80211_scan_request *req) { + struct wiphy *wiphy = local->hw.wiphy; struct ieee80211_link_data *link; + struct ieee80211_channel *chan; + int radio_idx; lockdep_assert_wiphy(local->hw.wiphy); + if (!req) + return false; + for_each_sdata_link(local, link) { - if (link->radar_required) - return true; + if (link->radar_required) { + if (wiphy->n_radio < 2) + return true; + + chan = link->conf->chanreq.oper.chan; + radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan); + /* + * The radio index (radio_idx) is expected to be valid, + * as it's derived from a channel tied to a link. If + * it's invalid (i.e., negative), return true to avoid + * potential issues with radar-sensitive operations. + */ + if (radio_idx < 0) + return true; + + if (ieee80211_is_radio_idx_in_scan_req(wiphy, req, + radio_idx)) + return true; + } } return false; @@ -720,7 +744,7 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local, /* turn idle off *before* setting channel -- some drivers need that */ changed = ieee80211_idle_off(local); if (changed) - ieee80211_hw_config(local, changed); + ieee80211_hw_config(local, -1, changed); err = drv_add_chanctx(local, ctx); if (err) { @@ -1381,6 +1405,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) goto out; } + link->radar_required = link->reserved_radar_required; list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links); rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf); diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h index 5b81998cb0c9..ef7c1a68d88d 100644 --- a/net/mac80211/debug.h +++ b/net/mac80211/debug.h @@ -1,10 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions - * Copyright (C) 2022 - 2024 Intel Corporation + * Copyright (C) 2022 - 2025 Intel Corporation */ #ifndef __MAC80211_DEBUG_H #define __MAC80211_DEBUG_H +#include <linux/once_lite.h> #include <net/cfg80211.h> #ifdef CONFIG_MAC80211_OCB_DEBUG @@ -152,6 +153,8 @@ do { \ else \ _sdata_err((link)->sdata, fmt, ##__VA_ARGS__); \ } while (0) +#define link_err_once(link, fmt, ...) \ + DO_ONCE_LITE(link_err, link, fmt, ##__VA_ARGS__) #define link_id_info(sdata, link_id, fmt, ...) \ do { \ if (ieee80211_vif_is_mld(&sdata->vif)) \ diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 307587c8a003..8baebb5636ec 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -143,15 +143,16 @@ int drv_change_interface(struct ieee80211_local *local, void drv_remove_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); -static inline int drv_config(struct ieee80211_local *local, u32 changed) +static inline int drv_config(struct ieee80211_local *local, int radio_idx, + u32 changed) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); - trace_drv_config(local, changed); - ret = local->ops->config(&local->hw, changed); + trace_drv_config(local, radio_idx, changed); + ret = local->ops->config(&local->hw, radio_idx, changed); trace_drv_return_int(local, ret); return ret; } @@ -387,45 +388,47 @@ static inline void drv_get_key_seq(struct ieee80211_local *local, } static inline int drv_set_frag_threshold(struct ieee80211_local *local, - u32 value) + int radio_idx, u32 value) { int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); - trace_drv_set_frag_threshold(local, value); + trace_drv_set_frag_threshold(local, radio_idx, value); if (local->ops->set_frag_threshold) - ret = local->ops->set_frag_threshold(&local->hw, value); + ret = local->ops->set_frag_threshold(&local->hw, radio_idx, + value); trace_drv_return_int(local, ret); return ret; } static inline int drv_set_rts_threshold(struct ieee80211_local *local, - u32 value) + int radio_idx, u32 value) { int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); - trace_drv_set_rts_threshold(local, value); + trace_drv_set_rts_threshold(local, radio_idx, value); if (local->ops->set_rts_threshold) - ret = local->ops->set_rts_threshold(&local->hw, value); + ret = local->ops->set_rts_threshold(&local->hw, radio_idx, + value); trace_drv_return_int(local, ret); return ret; } static inline int drv_set_coverage_class(struct ieee80211_local *local, - s16 value) + int radio_idx, s16 value) { int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); - trace_drv_set_coverage_class(local, value); + trace_drv_set_coverage_class(local, radio_idx, value); if (local->ops->set_coverage_class) - local->ops->set_coverage_class(&local->hw, value); + local->ops->set_coverage_class(&local->hw, radio_idx, value); else ret = -EOPNOTSUPP; @@ -631,6 +634,25 @@ static inline void drv_sta_statistics(struct ieee80211_local *local, trace_drv_return_void(local); } +static inline void drv_link_sta_statistics(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_sta *link_sta, + struct link_station_info *link_sinfo) +{ + might_sleep(); + lockdep_assert_wiphy(local->hw.wiphy); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_link_sta_statistics(local, sdata, link_sta); + if (local->ops->link_sta_statistics) + local->ops->link_sta_statistics(&local->hw, &sdata->vif, + link_sta, link_sinfo); + trace_drv_return_void(local); +} + int drv_conf_tx(struct ieee80211_local *local, struct ieee80211_link_data *link, u16 ac, const struct ieee80211_tx_queue_params *params); @@ -753,20 +775,21 @@ static inline int drv_set_antenna(struct ieee80211_local *local, might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (local->ops->set_antenna) - ret = local->ops->set_antenna(&local->hw, tx_ant, rx_ant); + ret = local->ops->set_antenna(&local->hw, -1, tx_ant, rx_ant); trace_drv_set_antenna(local, tx_ant, rx_ant, ret); return ret; } -static inline int drv_get_antenna(struct ieee80211_local *local, +static inline int drv_get_antenna(struct ieee80211_local *local, int radio_idx, u32 *tx_ant, u32 *rx_ant) { int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (local->ops->get_antenna) - ret = local->ops->get_antenna(&local->hw, tx_ant, rx_ant); - trace_drv_get_antenna(local, *tx_ant, *rx_ant, ret); + ret = local->ops->get_antenna(&local->hw, radio_idx, + tx_ant, rx_ant); + trace_drv_get_antenna(local, radio_idx, *tx_ant, *rx_ant, ret); return ret; } diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index a6e7b7ba6a01..6e36b09fe97f 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -635,7 +635,7 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { - unsigned long last_active = ieee80211_sta_last_active(sta); + unsigned long last_active = ieee80211_sta_last_active(sta, -1); if (sta->sdata == sdata && time_is_after_jiffies(last_active + @@ -1228,7 +1228,7 @@ static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { - unsigned long last_active = ieee80211_sta_last_active(sta); + unsigned long last_active = ieee80211_sta_last_active(sta, -1); if (sdata != sta->sdata) continue; @@ -1673,7 +1673,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) static void ieee80211_ibss_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = - from_timer(sdata, t, u.ibss.timer); + timer_container_of(sdata, t, u.ibss.timer); wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 30809f0b35f7..4ef7b3656aca 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1872,7 +1872,8 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, struct ieee80211_rx_status *status, unsigned int mpdu_len, unsigned int mpdu_offset); -int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); +int ieee80211_hw_config(struct ieee80211_local *local, int radio_idx, + u32 changed); int ieee80211_hw_conf_chan(struct ieee80211_local *local); void ieee80211_hw_conf_init(struct ieee80211_local *local); void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); @@ -2269,6 +2270,9 @@ void ieee80211_s1g_rx_twt_action(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); +void ieee80211_s1g_cap_to_sta_s1g_cap(struct ieee80211_sub_if_data *sdata, + const struct ieee80211_s1g_cap *s1g_cap_ie, + struct link_sta_info *link_sta); /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, @@ -2542,7 +2546,7 @@ static inline bool ieee80211_can_run_worker(struct ieee80211_local *local) } int ieee80211_txq_setup_flows(struct ieee80211_local *local); -void ieee80211_txq_set_params(struct ieee80211_local *local); +void ieee80211_txq_set_params(struct ieee80211_local *local, int radio_idx); void ieee80211_txq_teardown_flows(struct ieee80211_local *local); void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, @@ -2712,7 +2716,11 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, struct ieee80211_link_data *rsvd_for, bool check_reserved); -bool ieee80211_is_radar_required(struct ieee80211_local *local); +bool ieee80211_is_radar_required(struct ieee80211_local *local, + struct cfg80211_scan_request *req); +bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy, + struct cfg80211_scan_request *scan_req, + int radio_idx); void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work); void ieee80211_dfs_cac_cancel(struct ieee80211_local *local, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 7c27f3cd841c..7b2baebb8644 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -146,7 +146,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local) { u32 change = __ieee80211_recalc_idle(local, false); if (change) - ieee80211_hw_config(local, change); + ieee80211_hw_config(local, -1, change); } static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr, @@ -726,7 +726,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do /* do after stop to avoid reconfiguring when we stop anyway */ ieee80211_configure_filter(local); - ieee80211_hw_config(local, hw_reconf_flags); + ieee80211_hw_config(local, -1, hw_reconf_flags); if (local->virt_monitors == local->open_count) ieee80211_add_virtual_monitor(local); @@ -1491,7 +1491,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) if (local->open_count == 1) ieee80211_hw_conf_init(local); else if (hw_reconf_flags) - ieee80211_hw_config(local, hw_reconf_flags); + ieee80211_hw_config(local, -1, hw_reconf_flags); ieee80211_recalc_ps(local); diff --git a/net/mac80211/led.c b/net/mac80211/led.c index 885fa6aa3fc1..fabbffdd3ac2 100644 --- a/net/mac80211/led.c +++ b/net/mac80211/led.c @@ -257,7 +257,8 @@ static unsigned long tpt_trig_traffic(struct ieee80211_local *local, static void tpt_trig_timer(struct timer_list *t) { - struct tpt_led_trigger *tpt_trig = from_timer(tpt_trig, t, timer); + struct tpt_led_trigger *tpt_trig = timer_container_of(tpt_trig, t, + timer); struct ieee80211_local *local = tpt_trig->local; unsigned long on, off, tpt; int i; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 6b6de43d9420..c1c758e76d2e 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -190,7 +190,8 @@ static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local, return changed; } -int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) +int ieee80211_hw_config(struct ieee80211_local *local, int radio_idx, + u32 changed) { int ret = 0; @@ -201,7 +202,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) IEEE80211_CONF_CHANGE_SMPS)); if (changed && local->open_count) { - ret = drv_config(local, changed); + ret = drv_config(local, radio_idx, changed); /* * Goal: * HW reconfiguration should never fail, the driver has told @@ -235,7 +236,7 @@ static int _ieee80211_hw_conf_chan(struct ieee80211_local *local, if (!changed) return 0; - return drv_config(local, changed); + return drv_config(local, -1, changed); } int ieee80211_hw_conf_chan(struct ieee80211_local *local) @@ -269,7 +270,7 @@ void ieee80211_hw_conf_init(struct ieee80211_local *local) ctx ? &ctx->conf : NULL); } - WARN_ON(drv_config(local, changed)); + WARN_ON(drv_config(local, -1, changed)); } int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw, diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 5cc56d578048..d00d9d413c5c 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -40,7 +40,7 @@ void ieee80211s_stop(void) static void ieee80211_mesh_housekeeping_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = - from_timer(sdata, t, u.mesh.housekeeping_timer); + timer_container_of(sdata, t, u.mesh.housekeeping_timer); struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; @@ -684,7 +684,7 @@ int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *sk static void ieee80211_mesh_path_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = - from_timer(sdata, t, u.mesh.mesh_path_timer); + timer_container_of(sdata, t, u.mesh.mesh_path_timer); wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } @@ -692,7 +692,7 @@ static void ieee80211_mesh_path_timer(struct timer_list *t) static void ieee80211_mesh_path_root_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = - from_timer(sdata, t, u.mesh.mesh_path_root_timer); + timer_container_of(sdata, t, u.mesh.mesh_path_root_timer); struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 91444301a84a..9101858525dd 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1292,7 +1292,7 @@ int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata, void mesh_path_timer(struct timer_list *t) { - struct mesh_path *mpath = from_timer(mpath, t, timer); + struct mesh_path *mpath = timer_container_of(mpath, t, timer); struct ieee80211_sub_if_data *sdata = mpath->sdata; int ret; diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 9c6a2b342170..cb45a5d2009d 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -653,7 +653,7 @@ out: void mesh_plink_timer(struct timer_list *t) { - struct mesh_sta *mesh = from_timer(mesh, t, plink_timer); + struct mesh_sta *mesh = timer_container_of(mesh, t, plink_timer); struct sta_info *sta; u16 reason = 0; struct ieee80211_sub_if_data *sdata; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b84150dbfe8c..6001c8897d7c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3181,7 +3181,7 @@ static void ieee80211_enable_ps(struct ieee80211_local *local, return; conf->flags |= IEEE80211_CONF_PS; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); } } @@ -3193,7 +3193,7 @@ static void ieee80211_change_ps(struct ieee80211_local *local) ieee80211_enable_ps(local, local->ps_sdata); } else if (conf->flags & IEEE80211_CONF_PS) { conf->flags &= ~IEEE80211_CONF_PS; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); timer_delete_sync(&local->dynamic_ps_timer); wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); @@ -3302,7 +3302,7 @@ void ieee80211_dynamic_ps_disable_work(struct wiphy *wiphy, if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); } ieee80211_wake_queues_by_reason(&local->hw, @@ -3377,13 +3377,14 @@ void ieee80211_dynamic_ps_enable_work(struct wiphy *wiphy, (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; local->hw.conf.flags |= IEEE80211_CONF_PS; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); } } void ieee80211_dynamic_ps_timer(struct timer_list *t) { - struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer); + struct ieee80211_local *local = timer_container_of(local, t, + dynamic_ps_timer); wiphy_work_queue(local->hw.wiphy, &local->dynamic_ps_enable_work); } @@ -3985,7 +3986,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, */ if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); } local->ps_sdata = NULL; @@ -5398,6 +5399,12 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, bss_conf->epcs_support = false; } + if (elems->s1g_oper && + link->u.mgd.conn.mode == IEEE80211_CONN_MODE_S1G && + elems->s1g_capab) + ieee80211_s1g_cap_to_sta_s1g_cap(sdata, elems->s1g_capab, + link_sta); + bss_conf->twt_broadcast = ieee80211_twt_bcast_support(sdata, bss_conf, sband, link_sta); @@ -7220,11 +7227,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type); if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { struct ieee80211_ext *ext = (void *) mgmt; - - if (ieee80211_is_s1g_short_beacon(ext->frame_control)) - variable = ext->u.s1g_short_beacon.variable; - else - variable = ext->u.s1g_beacon.variable; + variable = ext->u.s1g_beacon.variable + + ieee80211_s1g_optional_len(ext->frame_control); } baselen = (u8 *) variable - (u8 *) mgmt; @@ -7342,7 +7346,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, if (local->hw.conf.dynamic_ps_timeout > 0) { if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; - ieee80211_hw_config(local, + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); } ieee80211_send_nullfunc(local, sdata, false); @@ -8082,7 +8086,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, static void ieee80211_sta_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = - from_timer(sdata, t, u.mgd.timer); + timer_container_of(sdata, t, u.mgd.timer); wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } @@ -8388,7 +8392,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) static void ieee80211_sta_bcn_mon_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = - from_timer(sdata, t, u.mgd.bcn_mon_timer); + timer_container_of(sdata, t, u.mgd.bcn_mon_timer); if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) return; @@ -8408,7 +8412,7 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t) static void ieee80211_sta_conn_mon_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = - from_timer(sdata, t, u.mgd.conn_mon_timer); + timer_container_of(sdata, t, u.mgd.conn_mon_timer); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; struct sta_info *sta; diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c index ece1e83c7b2f..a5d4358f122a 100644 --- a/net/mac80211/ocb.c +++ b/net/mac80211/ocb.c @@ -146,7 +146,7 @@ void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata) static void ieee80211_ocb_housekeeping_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = - from_timer(sdata, t, u.ocb.housekeeping_timer); + timer_container_of(sdata, t, u.ocb.housekeeping_timer); struct ieee80211_local *local = sdata->local; struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 2b9abc27462e..13df6321634d 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -39,7 +39,7 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata) if (local->hw.conf.flags & IEEE80211_CONF_PS) { offchannel_ps_enabled = true; local->hw.conf.flags &= ~IEEE80211_CONF_PS; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); } if (!offchannel_ps_enabled || @@ -567,6 +567,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, { struct ieee80211_roc_work *roc, *tmp; bool queued = false, combine_started = true; + struct cfg80211_scan_request *req; int ret; lockdep_assert_wiphy(local->hw.wiphy); @@ -612,9 +613,11 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, roc->mgmt_tx_cookie = *cookie; } + req = wiphy_dereference(local->hw.wiphy, local->scan_req); + /* if there's no need to queue, handle it immediately */ if (list_empty(&local->roc_list) && - !local->scanning && !ieee80211_is_radar_required(local)) { + !local->scanning && !ieee80211_is_radar_required(local, req)) { /* if not HW assist, just queue & schedule work */ if (!local->ops->remain_on_channel) { list_add_tail(&roc->list, &local->roc_list); diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index a9cc832240a5..5a508d99e84f 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -108,7 +108,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) sdata->u.mgd.powersave && !(local->hw.conf.flags & IEEE80211_CONF_PS)) { local->hw.conf.flags |= IEEE80211_CONF_PS; - ieee80211_hw_config(local, + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); } } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 09beb65d6108..8699755081ad 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -231,8 +231,19 @@ static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, skb_queue_tail(&sdata->skb_queue, skb); wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); - if (sta) - sta->deflink.rx_stats.packets++; + if (sta) { + struct link_sta_info *link_sta_info; + + if (link_id >= 0) { + link_sta_info = rcu_dereference(sta->link[link_id]); + if (!link_sta_info) + return; + } else { + link_sta_info = &sta->deflink; + } + + link_sta_info->rx_stats.packets++; + } } static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, @@ -4432,6 +4443,10 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) if (!multicast && !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) return false; + /* reject invalid/our STA address */ + if (!is_valid_ether_addr(hdr->addr2) || + ether_addr_equal(sdata->dev->dev_addr, hdr->addr2)) + return false; if (!rx->sta) { int rate_idx; if (status->encoding != RX_ENC_LEGACY) diff --git a/net/mac80211/s1g.c b/net/mac80211/s1g.c index d4ed0c0a335c..1f68df6e8067 100644 --- a/net/mac80211/s1g.c +++ b/net/mac80211/s1g.c @@ -194,3 +194,29 @@ void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata, break; } } + +void ieee80211_s1g_cap_to_sta_s1g_cap(struct ieee80211_sub_if_data *sdata, + const struct ieee80211_s1g_cap *s1g_cap_ie, + struct link_sta_info *link_sta) +{ + struct ieee80211_sta_s1g_cap *s1g_cap = &link_sta->pub->s1g_cap; + + memset(s1g_cap, 0, sizeof(*s1g_cap)); + + memcpy(s1g_cap->cap, s1g_cap_ie->capab_info, sizeof(s1g_cap->cap)); + memcpy(s1g_cap->nss_mcs, s1g_cap_ie->supp_mcs_nss, + sizeof(s1g_cap->nss_mcs)); + + s1g_cap->s1g = true; + + /* Maximum MPDU length is 1 bit for S1G */ + if (s1g_cap->cap[3] & S1G_CAP3_MAX_MPDU_LEN) { + link_sta->pub->agg.max_amsdu_len = + IEEE80211_MAX_MPDU_LEN_VHT_7991; + } else { + link_sta->pub->agg.max_amsdu_len = + IEEE80211_MAX_MPDU_LEN_VHT_3895; + } + + ieee80211_sta_recalc_aggregates(&link_sta->sta->sta); +} diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 7b8da40a912d..9799164a56d9 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -276,6 +276,7 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ieee80211_bss *bss; struct ieee80211_channel *channel; + struct ieee80211_ext *ext; size_t min_hdr_len = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); @@ -285,12 +286,10 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) return; if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { - if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) - min_hdr_len = offsetof(struct ieee80211_ext, - u.s1g_short_beacon.variable); - else - min_hdr_len = offsetof(struct ieee80211_ext, - u.s1g_beacon); + ext = (struct ieee80211_ext *)mgmt; + min_hdr_len = + offsetof(struct ieee80211_ext, u.s1g_beacon.variable) + + ieee80211_s1g_optional_len(ext->frame_control); } if (skb->len < min_hdr_len) @@ -587,7 +586,8 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local, return 0; } -static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata) +static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata, + struct cfg80211_scan_request *req) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *sdata_iter; @@ -595,7 +595,7 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata) lockdep_assert_wiphy(local->hw.wiphy); - if (!ieee80211_is_radar_required(local)) + if (!ieee80211_is_radar_required(local, req)) return true; if (!regulatory_pre_cac_allowed(local->hw.wiphy)) @@ -611,9 +611,10 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata) } static bool ieee80211_can_scan(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) + struct ieee80211_sub_if_data *sdata, + struct cfg80211_scan_request *req) { - if (!__ieee80211_can_leave_ch(sdata)) + if (!__ieee80211_can_leave_ch(sdata, req)) return false; if (!list_empty(&local->roc_list)) @@ -628,15 +629,19 @@ static bool ieee80211_can_scan(struct ieee80211_local *local, void ieee80211_run_deferred_scan(struct ieee80211_local *local) { + struct cfg80211_scan_request *req; + lockdep_assert_wiphy(local->hw.wiphy); if (!local->scan_req || local->scanning) return; + req = wiphy_dereference(local->hw.wiphy, local->scan_req); if (!ieee80211_can_scan(local, rcu_dereference_protected( local->scan_sdata, - lockdep_is_held(&local->hw.wiphy->mtx)))) + lockdep_is_held(&local->hw.wiphy->mtx)), + req)) return; wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, @@ -733,10 +738,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, !(sdata->vif.active_links & BIT(req->tsf_report_link_id))) return -EINVAL; - if (!__ieee80211_can_leave_ch(sdata)) + if (!__ieee80211_can_leave_ch(sdata, req)) return -EBUSY; - if (!ieee80211_can_scan(local, sdata)) { + if (!ieee80211_can_scan(local, sdata, req)) { /* wait for the work to finish/time out */ rcu_assign_pointer(local->scan_req, req); rcu_assign_pointer(local->scan_sdata, sdata); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 84b18be1f0b1..89cf365b07e6 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -355,6 +355,50 @@ static void sta_info_free_link(struct link_sta_info *link_sta) free_percpu(link_sta->pcpu_rx_stats); } +static void sta_accumulate_removed_link_stats(struct sta_info *sta, int link_id) +{ + struct link_sta_info *link_sta = wiphy_dereference(sta->local->hw.wiphy, + sta->link[link_id]); + struct ieee80211_link_data *link; + int ac, tid; + u32 thr; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + sta->rem_link_stats.tx_packets += + link_sta->tx_stats.packets[ac]; + sta->rem_link_stats.tx_bytes += link_sta->tx_stats.bytes[ac]; + } + + sta->rem_link_stats.rx_packets += link_sta->rx_stats.packets; + sta->rem_link_stats.rx_bytes += link_sta->rx_stats.bytes; + sta->rem_link_stats.tx_retries += link_sta->status_stats.retry_count; + sta->rem_link_stats.tx_failed += link_sta->status_stats.retry_failed; + sta->rem_link_stats.rx_dropped_misc += link_sta->rx_stats.dropped; + + thr = sta_get_expected_throughput(sta); + if (thr != 0) + sta->rem_link_stats.expected_throughput += thr; + + for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { + sta->rem_link_stats.pertid_stats.rx_msdu += + link_sta->rx_stats.msdu[tid]; + sta->rem_link_stats.pertid_stats.tx_msdu += + link_sta->tx_stats.msdu[tid]; + sta->rem_link_stats.pertid_stats.tx_msdu_retries += + link_sta->status_stats.msdu_retries[tid]; + sta->rem_link_stats.pertid_stats.tx_msdu_failed += + link_sta->status_stats.msdu_failed[tid]; + } + + if (sta->sdata->vif.type == NL80211_IFTYPE_STATION) { + link = wiphy_dereference(sta->sdata->local->hw.wiphy, + sta->sdata->link[link_id]); + if (link) + sta->rem_link_stats.beacon_loss_count += + link->u.mgd.beacon_loss_count; + } +} + static void sta_remove_link(struct sta_info *sta, unsigned int link_id, bool unhash) { @@ -377,6 +421,10 @@ static void sta_remove_link(struct sta_info *sta, unsigned int link_id, alloc = container_of(link_sta, typeof(*alloc), info); sta->sta.valid_links &= ~BIT(link_id); + + /* store removed link info for accumulated stats consistency */ + sta_accumulate_removed_link_stats(sta, link_id); + RCU_INIT_POINTER(sta->link[link_id], NULL); RCU_INIT_POINTER(sta->sta.link[link_id], NULL); if (alloc) { @@ -1542,7 +1590,8 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, static void sta_info_cleanup(struct timer_list *t) { - struct ieee80211_local *local = from_timer(local, t, sta_cleanup); + struct ieee80211_local *local = timer_container_of(local, t, + sta_cleanup); struct sta_info *sta; bool timer_needed = false; @@ -1650,7 +1699,7 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { - unsigned long last_active = ieee80211_sta_last_active(sta); + unsigned long last_active = ieee80211_sta_last_active(sta, -1); if (sdata != sta->sdata) continue; @@ -2419,18 +2468,27 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, } static struct ieee80211_sta_rx_stats * -sta_get_last_rx_stats(struct sta_info *sta) +sta_get_last_rx_stats(struct sta_info *sta, int link_id) { - struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats; + struct ieee80211_sta_rx_stats *stats; + struct link_sta_info *link_sta_info; int cpu; - if (!sta->deflink.pcpu_rx_stats) + if (link_id < 0) + link_sta_info = &sta->deflink; + else + link_sta_info = wiphy_dereference(sta->local->hw.wiphy, + sta->link[link_id]); + + stats = &link_sta_info->rx_stats; + + if (!link_sta_info->pcpu_rx_stats) return stats; for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpustats; - cpustats = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); + cpustats = per_cpu_ptr(link_sta_info->pcpu_rx_stats, cpu); if (time_after(cpustats->last_rx, stats->last_rx)) stats = cpustats; @@ -2498,9 +2556,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, } } -static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) +static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo, + int link_id) { - u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); + u32 rate = READ_ONCE(sta_get_last_rx_stats(sta, link_id)->last_rate); if (rate == STA_STATS_RATE_INVALID) return -EINVAL; @@ -2525,20 +2584,28 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, static void sta_set_tidstats(struct sta_info *sta, struct cfg80211_tid_stats *tidstats, - int tid) + int tid, int link_id) { struct ieee80211_local *local = sta->local; + struct link_sta_info *link_sta_info; int cpu; + if (link_id < 0) + link_sta_info = &sta->deflink; + else + link_sta_info = wiphy_dereference(sta->local->hw.wiphy, + sta->link[link_id]); + if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { - tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->deflink.rx_stats, - tid); + tidstats->rx_msdu += + sta_get_tidstats_msdu(&link_sta_info->rx_stats, + tid); - if (sta->deflink.pcpu_rx_stats) { + if (link_sta_info->pcpu_rx_stats) { for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpurxs; - cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, + cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats, cpu); tidstats->rx_msdu += sta_get_tidstats_msdu(cpurxs, tid); @@ -2550,22 +2617,24 @@ static void sta_set_tidstats(struct sta_info *sta, if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); - tidstats->tx_msdu = sta->deflink.tx_stats.msdu[tid]; + tidstats->tx_msdu = link_sta_info->tx_stats.msdu[tid]; } if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); - tidstats->tx_msdu_retries = sta->deflink.status_stats.msdu_retries[tid]; + tidstats->tx_msdu_retries = + link_sta_info->status_stats.msdu_retries[tid]; } if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); - tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid]; + tidstats->tx_msdu_failed = + link_sta_info->status_stats.msdu_failed[tid]; } - if (tid < IEEE80211_NUM_TIDS) { + if (link_id < 0 && tid < IEEE80211_NUM_TIDS) { spin_lock_bh(&local->fq.lock); rcu_read_lock(); @@ -2624,16 +2693,278 @@ static void sta_set_mesh_sinfo(struct sta_info *sta, } #endif +void sta_set_accumulated_removed_links_sinfo(struct sta_info *sta, + struct station_info *sinfo) +{ + /* Accumulating the removed link statistics. */ + sinfo->tx_packets = sta->rem_link_stats.tx_packets; + sinfo->rx_packets = sta->rem_link_stats.rx_packets; + sinfo->tx_bytes = sta->rem_link_stats.tx_bytes; + sinfo->rx_bytes = sta->rem_link_stats.rx_bytes; + sinfo->tx_retries = sta->rem_link_stats.tx_retries; + sinfo->tx_failed = sta->rem_link_stats.tx_failed; + sinfo->rx_dropped_misc = sta->rem_link_stats.rx_dropped_misc; + sinfo->beacon_loss_count = sta->rem_link_stats.beacon_loss_count; + sinfo->expected_throughput = sta->rem_link_stats.expected_throughput; + + if (sinfo->pertid) { + sinfo->pertid->rx_msdu = + sta->rem_link_stats.pertid_stats.rx_msdu; + sinfo->pertid->tx_msdu = + sta->rem_link_stats.pertid_stats.tx_msdu; + sinfo->pertid->tx_msdu_retries = + sta->rem_link_stats.pertid_stats.tx_msdu_retries; + sinfo->pertid->tx_msdu_failed = + sta->rem_link_stats.pertid_stats.tx_msdu_failed; + } +} + +static void sta_set_link_sinfo(struct sta_info *sta, + struct link_station_info *link_sinfo, + struct ieee80211_link_data *link, + bool tidstats) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_sta_rx_stats *last_rxstats; + int i, ac, cpu, link_id = link->link_id; + struct link_sta_info *link_sta_info; + u32 thr = 0; + + last_rxstats = sta_get_last_rx_stats(sta, link_id); + + link_sta_info = wiphy_dereference(sta->local->hw.wiphy, + sta->link[link_id]); + + /* do before driver, so beacon filtering drivers have a + * chance to e.g. just add the number of filtered beacons + * (or just modify the value entirely, of course) + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION) + link_sinfo->rx_beacon = link->u.mgd.count_beacon_signal; + + ether_addr_copy(link_sinfo->addr, link_sta_info->addr); + + drv_link_sta_statistics(sta->local, sdata, + link_sta_info->pub, + link_sinfo); + + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | + BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + link_sinfo->beacon_loss_count = + link->u.mgd.beacon_loss_count; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); + } + + link_sinfo->inactive_time = + jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, link_id)); + + if (!(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { + link_sinfo->tx_bytes = 0; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + link_sinfo->tx_bytes += + link_sta_info->tx_stats.bytes[ac]; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { + link_sinfo->tx_packets = 0; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + link_sinfo->tx_packets += + link_sta_info->tx_stats.packets[ac]; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); + } + + if (!(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | + BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { + link_sinfo->rx_bytes += + sta_get_stats_bytes(&link_sta_info->rx_stats); + + if (link_sta_info->pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats, + cpu); + link_sinfo->rx_bytes += + sta_get_stats_bytes(cpurxs); + } + } + + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { + link_sinfo->rx_packets = link_sta_info->rx_stats.packets; + if (link_sta_info->pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats, + cpu); + link_sinfo->rx_packets += cpurxs->packets; + } + } + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { + link_sinfo->tx_retries = + link_sta_info->status_stats.retry_count; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { + link_sinfo->tx_failed = + link_sta_info->status_stats.retry_failed; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + link_sinfo->rx_duration += sta->airtime[ac].rx_airtime; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + link_sinfo->tx_duration += sta->airtime[ac].tx_airtime; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { + link_sinfo->airtime_weight = sta->airtime_weight; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); + } + + link_sinfo->rx_dropped_misc = link_sta_info->rx_stats.dropped; + if (link_sta_info->pcpu_rx_stats) { + for_each_possible_cpu(cpu) { + struct ieee80211_sta_rx_stats *cpurxs; + + cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats, + cpu); + link_sinfo->rx_dropped_misc += cpurxs->dropped; + } + } + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | + BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); + link_sinfo->rx_beacon_signal_avg = + ieee80211_ave_rssi(&sdata->vif, -1); + } + + if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || + ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { + link_sinfo->signal = (s8)last_rxstats->last_signal; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + } + + if (!link_sta_info->pcpu_rx_stats && + !(link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { + link_sinfo->signal_avg = + -ewma_signal_read(&link_sta_info->rx_stats_avg.signal); + link_sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); + } + } + + /* for the average - if pcpu_rx_stats isn't set - rxstats must point to + * the sta->rx_stats struct, so the check here is fine with and without + * pcpu statistics + */ + if (last_rxstats->chains && + !(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | + BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); + if (!link_sta_info->pcpu_rx_stats) + link_sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); + + link_sinfo->chains = last_rxstats->chains; + + for (i = 0; i < ARRAY_SIZE(link_sinfo->chain_signal); i++) { + link_sinfo->chain_signal[i] = + last_rxstats->chain_signal_last[i]; + link_sinfo->chain_signal_avg[i] = + -ewma_signal_read( + &link_sta_info->rx_stats_avg.chain_signal[i]); + } + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) && + ieee80211_rate_valid(&link_sta_info->tx_stats.last_rate)) { + sta_set_rate_info_tx(sta, &link_sta_info->tx_stats.last_rate, + &link_sinfo->txrate); + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) { + if (sta_set_rate_info_rx(sta, &link_sinfo->rxrate, + link_id) == 0) + link_sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_RX_BITRATE); + } + + if (tidstats && !cfg80211_link_sinfo_alloc_tid_stats(link_sinfo, + GFP_KERNEL)) { + for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) + sta_set_tidstats(sta, &link_sinfo->pertid[i], i, + link_id); + } + + link_sinfo->bss_param.flags = 0; + if (sdata->vif.bss_conf.use_cts_prot) + link_sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; + if (sdata->vif.bss_conf.use_short_preamble) + link_sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; + if (sdata->vif.bss_conf.use_short_slot) + link_sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; + link_sinfo->bss_param.dtim_period = link->conf->dtim_period; + link_sinfo->bss_param.beacon_interval = link->conf->beacon_int; + + thr = sta_get_expected_throughput(sta); + + if (thr != 0) { + link_sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); + link_sinfo->expected_throughput = thr; + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && + link_sta_info->status_stats.ack_signal_filled) { + link_sinfo->ack_signal = + link_sta_info->status_stats.last_ack_signal; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); + } + + if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && + link_sta_info->status_stats.ack_signal_filled) { + link_sinfo->avg_ack_signal = + -(s8)ewma_avg_signal_read( + &link_sta_info->status_stats.avg_ack_signal); + link_sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); + } +} + void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, bool tidstats) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; u32 thr = 0; - int i, ac, cpu; + int i, ac, cpu, link_id; struct ieee80211_sta_rx_stats *last_rxstats; - last_rxstats = sta_get_last_rx_stats(sta); + last_rxstats = sta_get_last_rx_stats(sta, -1); sinfo->generation = sdata->local->sta_generation; @@ -2661,7 +2992,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->connected_time = ktime_get_seconds() - sta->last_connected; sinfo->assoc_at = sta->assoc_at; sinfo->inactive_time = - jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); + jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, -1)); if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { @@ -2750,7 +3081,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); - sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); + sinfo->rx_beacon_signal_avg = + ieee80211_ave_rssi(&sdata->vif, -1); } if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || @@ -2799,13 +3131,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) && !sta->sta.valid_links) { - if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) + if (sta_set_rate_info_rx(sta, &sinfo->rxrate, -1) == 0) sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); } if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) - sta_set_tidstats(sta, &sinfo->pertid[i], i); + sta_set_tidstats(sta, &sinfo->pertid[i], i, -1); } #ifdef CONFIG_MAC80211_MESH @@ -2867,6 +3199,26 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); } + + if (sta->sta.valid_links) { + struct ieee80211_link_data *link; + struct link_sta_info *link_sta; + + ether_addr_copy(sinfo->mld_addr, sta->addr); + for_each_valid_link(sinfo, link_id) { + link_sta = wiphy_dereference(sta->local->hw.wiphy, + sta->link[link_id]); + link = wiphy_dereference(sdata->local->hw.wiphy, + sdata->link[link_id]); + + if (!link_sta || !sinfo->links[link_id] || !link) + continue; + + sinfo->valid_links = sta->sta.valid_links; + sta_set_link_sinfo(sta, sinfo->links[link_id], + link, tidstats); + } + } } u32 sta_get_expected_throughput(struct sta_info *sta) @@ -2888,14 +3240,24 @@ u32 sta_get_expected_throughput(struct sta_info *sta) return thr; } -unsigned long ieee80211_sta_last_active(struct sta_info *sta) +unsigned long ieee80211_sta_last_active(struct sta_info *sta, int link_id) { - struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); + struct ieee80211_sta_rx_stats *stats; + struct link_sta_info *link_sta_info; - if (!sta->deflink.status_stats.last_ack || - time_after(stats->last_rx, sta->deflink.status_stats.last_ack)) + stats = sta_get_last_rx_stats(sta, link_id); + + if (link_id < 0) + link_sta_info = &sta->deflink; + else + link_sta_info = wiphy_dereference(sta->local->hw.wiphy, + sta->link[link_id]); + + if (!link_sta_info->status_stats.last_ack || + time_after(stats->last_rx, link_sta_info->status_stats.last_ack)) return stats->last_rx; - return sta->deflink.status_stats.last_ack; + + return link_sta_info->status_stats.last_ack; } int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id) diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 7a95d8d34fca..5288d5286651 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -569,6 +569,58 @@ struct link_sta_info { }; /** + * struct ieee80211_sta_removed_link_stats - Removed link sta data + * + * keep required accumulated removed link data for stats + * + * @rx_packets: accumulated packets (MSDUs & MMPDUs) received from + * this station for removed links + * @tx_packets: accumulated packets (MSDUs & MMPDUs) transmitted to + * this station for removed links + * @rx_bytes: accumulated bytes (size of MPDUs) received from this + * station for removed links + * @tx_bytes: accumulated bytes (size of MPDUs) transmitted to this + * station for removed links + * @tx_retries: cumulative retry counts (MPDUs) for removed links + * @tx_failed: accumulated number of failed transmissions (MPDUs) + * (retries exceeded, no ACK) for removed links + * @rx_dropped_misc: accumulated dropped packets for un-specified reason + * from this station for removed links + * @beacon_loss_count: Number of times beacon loss event has triggered + * from this station for removed links. + * @expected_throughput: expected throughput in kbps (including 802.11 + * headers) towards this station for removed links + * @pertid_stats: accumulated per-TID statistics for removed link of + * station + * @pertid_stats.rx_msdu : accumulated number of received MSDUs towards + * this station for removed links. + * @pertid_stats.tx_msdu: accumulated number of (attempted) transmitted + * MSDUs towards this station for removed links + * @pertid_stats.tx_msdu_retries: accumulated number of retries (not + * counting the first) for transmitted MSDUs towards this station + * for removed links + * @pertid_stats.tx_msdu_failed: accumulated number of failed transmitted + * MSDUs towards this station for removed links + */ +struct ieee80211_sta_removed_link_stats { + u32 rx_packets; + u32 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u32 tx_retries; + u32 tx_failed; + u32 rx_dropped_misc; + u32 beacon_loss_count; + u32 expected_throughput; + struct { + u64 rx_msdu; + u64 tx_msdu; + u64 tx_msdu_retries; + u64 tx_msdu_failed; + } pertid_stats; +}; + +/** * struct sta_info - STA information * * This structure collects information about a station that @@ -644,6 +696,7 @@ struct link_sta_info { * @deflink address and remaining would be allocated and the address * would be assigned to link[link_id] where link_id is the id assigned * by the AP. + * @rem_link_stats: accumulated removed link stats */ struct sta_info { /* General information, mostly static */ @@ -718,6 +771,7 @@ struct sta_info { struct ieee80211_sta_aggregates cur; struct link_sta_info deflink; struct link_sta_info __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + struct ieee80211_sta_removed_link_stats rem_link_stats; /* keep last! */ struct ieee80211_sta sta; @@ -922,6 +976,9 @@ void sta_set_rate_info_tx(struct sta_info *sta, void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, bool tidstats); +void sta_set_accumulated_removed_links_sinfo(struct sta_info *sta, + struct station_info *sinfo); + u32 sta_get_expected_throughput(struct sta_info *sta); void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, @@ -936,7 +993,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta); void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta); -unsigned long ieee80211_sta_last_active(struct sta_info *sta); +unsigned long ieee80211_sta_last_active(struct sta_info *sta, int link_id); void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta, const u8 *ext_capab, diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 72fad8ea8bb9..0bfbce157486 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -384,12 +384,14 @@ DEFINE_EVENT(local_sdata_addr_evt, drv_remove_interface, TRACE_EVENT(drv_config, TP_PROTO(struct ieee80211_local *local, + int radio_idx, u32 changed), - TP_ARGS(local, changed), + TP_ARGS(local, radio_idx, changed), TP_STRUCT__entry( LOCAL_ENTRY + __field(int, radio_idx) __field(u32, changed) __field(u32, flags) __field(int, power_level) @@ -403,6 +405,7 @@ TRACE_EVENT(drv_config, TP_fast_assign( LOCAL_ASSIGN; + __entry->radio_idx = radio_idx; __entry->changed = changed; __entry->flags = local->hw.conf.flags; __entry->power_level = local->hw.conf.power_level; @@ -417,8 +420,8 @@ TRACE_EVENT(drv_config, ), TP_printk( - LOCAL_PR_FMT " ch:%#x" CHANDEF_PR_FMT, - LOCAL_PR_ARG, __entry->changed, CHANDEF_PR_ARG + LOCAL_PR_FMT " radio_idx:%d ch:%#x" CHANDEF_PR_FMT, + LOCAL_PR_ARG, __entry->radio_idx, __entry->changed, CHANDEF_PR_ARG ) ); @@ -818,34 +821,71 @@ TRACE_EVENT(drv_get_key_seq, ) ); -DEFINE_EVENT(local_u32_evt, drv_set_frag_threshold, - TP_PROTO(struct ieee80211_local *local, u32 value), - TP_ARGS(local, value) +TRACE_EVENT(drv_set_frag_threshold, + TP_PROTO(struct ieee80211_local *local, int radio_idx, u32 value), + + TP_ARGS(local, radio_idx, value), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, radio_idx) + __field(u32, value) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->radio_idx = radio_idx; + __entry->value = value; + ), + + TP_printk( + LOCAL_PR_FMT " radio_id:%d value:%u", + LOCAL_PR_ARG, __entry->radio_idx, __entry->value + ) ); -DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold, - TP_PROTO(struct ieee80211_local *local, u32 value), - TP_ARGS(local, value) +TRACE_EVENT(drv_set_rts_threshold, + TP_PROTO(struct ieee80211_local *local, int radio_idx, u32 value), + + TP_ARGS(local, radio_idx, value), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, radio_idx) + __field(u32, value) + ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->radio_idx = radio_idx; + __entry->value = value; + ), + + TP_printk( + LOCAL_PR_FMT " radio_id:%d value:%u", + LOCAL_PR_ARG, __entry->radio_idx, __entry->value + ) ); TRACE_EVENT(drv_set_coverage_class, - TP_PROTO(struct ieee80211_local *local, s16 value), + TP_PROTO(struct ieee80211_local *local, int radio_idx, s16 value), - TP_ARGS(local, value), + TP_ARGS(local, radio_idx, value), TP_STRUCT__entry( LOCAL_ENTRY + __field(int, radio_idx) __field(s16, value) ), TP_fast_assign( LOCAL_ASSIGN; + __entry->radio_idx = radio_idx; __entry->value = value; ), TP_printk( - LOCAL_PR_FMT " value:%d", - LOCAL_PR_ARG, __entry->value + LOCAL_PR_FMT " radio_id:%d value:%d", + LOCAL_PR_ARG, __entry->radio_idx, __entry->value ) ); @@ -1002,6 +1042,33 @@ DEFINE_EVENT(sta_event, drv_sta_statistics, TP_ARGS(local, sdata, sta) ); +TRACE_EVENT(drv_link_sta_statistics, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_sta *link_sta), + + TP_ARGS(local, sdata, link_sta), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, link_id) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_NAMED_ASSIGN(link_sta->sta); + __entry->link_id = link_sta->link_id; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " (link %d)", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->link_id + ) +); + DEFINE_EVENT(sta_event, drv_sta_add, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, @@ -1291,12 +1358,14 @@ TRACE_EVENT(drv_set_antenna, ); TRACE_EVENT(drv_get_antenna, - TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret), + TP_PROTO(struct ieee80211_local *local, int radio_idx, u32 tx_ant, + u32 rx_ant, int ret), - TP_ARGS(local, tx_ant, rx_ant, ret), + TP_ARGS(local, radio_idx, tx_ant, rx_ant, ret), TP_STRUCT__entry( LOCAL_ENTRY + __field(int, radio_idx) __field(u32, tx_ant) __field(u32, rx_ant) __field(int, ret) @@ -1304,14 +1373,16 @@ TRACE_EVENT(drv_get_antenna, TP_fast_assign( LOCAL_ASSIGN; + __entry->radio_idx = radio_idx; __entry->tx_ant = tx_ant; __entry->rx_ant = rx_ant; __entry->ret = ret; ), TP_printk( - LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d", - LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret + LOCAL_PR_FMT " radio_idx:%d tx_ant:%d rx_ant:%d ret:%d", + LOCAL_PR_ARG, __entry->radio_idx, __entry->tx_ant, + __entry->rx_ant, __entry->ret ) ); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index d8d4f3d7d7f2..6fa883a9250d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -5,7 +5,7 @@ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation * * Transmit and frame generation functions. */ @@ -1173,7 +1173,8 @@ void ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata, return; if (!sta || - (!sta->sta.valid_links && !sta->sta.deflink.ht_cap.ht_supported) || + (!sta->sta.valid_links && !sta->sta.deflink.ht_cap.ht_supported && + !sta->sta.deflink.s1g_cap.s1g) || !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO || skb->protocol == sdata->control_port_protocol) return; @@ -1541,7 +1542,7 @@ void ieee80211_txq_purge(struct ieee80211_local *local, spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]); } -void ieee80211_txq_set_params(struct ieee80211_local *local) +void ieee80211_txq_set_params(struct ieee80211_local *local, int radio_idx) { if (local->hw.wiphy->txq_limit) local->fq.limit = local->hw.wiphy->txq_limit; @@ -1605,7 +1606,7 @@ int ieee80211_txq_setup_flows(struct ieee80211_local *local) for (i = 0; i < fq->flows_cnt; i++) codel_vars_init(&local->cvars[i]); - ieee80211_txq_set_params(local); + ieee80211_txq_set_params(local, -1); return 0; } @@ -5016,12 +5017,25 @@ static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata, } } -static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon) +static u8 __ieee80211_beacon_update_cntdwn(struct ieee80211_link_data *link, + struct beacon_data *beacon) { - beacon->cntdwn_current_counter--; + if (beacon->cntdwn_current_counter == 1) { + /* + * Channel switch handling is done by a worker thread while + * beacons get pulled from hardware timers. It's therefore + * possible that software threads are slow enough to not be + * able to complete CSA handling in a single beacon interval, + * in which case we get here. There isn't much to do about + * it, other than letting the user know that the AP isn't + * behaving correctly. + */ + link_err_once(link, + "beacon TX faster than countdown (channel/color switch) completion\n"); + return 0; + } - /* the counter should never reach 0 */ - WARN_ON_ONCE(!beacon->cntdwn_current_counter); + beacon->cntdwn_current_counter--; return beacon->cntdwn_current_counter; } @@ -5052,7 +5066,7 @@ u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif, unsigned int link_i if (!beacon) goto unlock; - count = __ieee80211_beacon_update_cntdwn(beacon); + count = __ieee80211_beacon_update_cntdwn(link, beacon); unlock: rcu_read_unlock(); @@ -5450,7 +5464,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, if (beacon->cntdwn_counter_offsets[0]) { if (!is_template) - __ieee80211_beacon_update_cntdwn(beacon); + __ieee80211_beacon_update_cntdwn(link, beacon); ieee80211_set_beacon_cntdwn(sdata, beacon, link); } @@ -5482,7 +5496,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, * for now we leave it consistent with overall * mac80211's behavior. */ - __ieee80211_beacon_update_cntdwn(beacon); + __ieee80211_beacon_update_cntdwn(link, beacon); ieee80211_set_beacon_cntdwn(sdata, beacon, link); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 27d414efa3fd..c0ebf61da40f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1756,6 +1756,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) bool sched_scan_stopped = false; bool suspended = local->suspended; bool in_reconfig = false; + u32 rts_threshold; lockdep_assert_wiphy(local->hw.wiphy); @@ -1826,13 +1827,20 @@ int ieee80211_reconfig(struct ieee80211_local *local) } /* setup fragmentation threshold */ - drv_set_frag_threshold(local, hw->wiphy->frag_threshold); + drv_set_frag_threshold(local, -1, hw->wiphy->frag_threshold); /* setup RTS threshold */ - drv_set_rts_threshold(local, hw->wiphy->rts_threshold); + if (hw->wiphy->n_radio > 0) { + for (i = 0; i < hw->wiphy->n_radio; i++) { + rts_threshold = hw->wiphy->radio_cfg[i].rts_threshold; + drv_set_rts_threshold(local, i, rts_threshold); + } + } else { + drv_set_rts_threshold(local, -1, hw->wiphy->rts_threshold); + } /* reset coverage class */ - drv_set_coverage_class(local, hw->wiphy->coverage_class); + drv_set_coverage_class(local, -1, hw->wiphy->coverage_class); ieee80211_led_radio(local, true); ieee80211_mod_tpt_led_trig(local, @@ -1890,11 +1898,11 @@ int ieee80211_reconfig(struct ieee80211_local *local) ieee80211_assign_chanctx(local, sdata, &sdata->deflink); /* reconfigure hardware */ - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL | - IEEE80211_CONF_CHANGE_MONITOR | - IEEE80211_CONF_CHANGE_PS | - IEEE80211_CONF_CHANGE_RETRY_LIMITS | - IEEE80211_CONF_CHANGE_IDLE); + ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL | + IEEE80211_CONF_CHANGE_MONITOR | + IEEE80211_CONF_CHANGE_PS | + IEEE80211_CONF_CHANGE_RETRY_LIMITS | + IEEE80211_CONF_CHANGE_IDLE); ieee80211_configure_filter(local); @@ -3265,14 +3273,24 @@ int ieee80211_put_srates_elem(struct sk_buff *skb, return 0; } -int ieee80211_ave_rssi(struct ieee80211_vif *vif) +int ieee80211_ave_rssi(struct ieee80211_vif *vif, int link_id) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_link_data *link_data; if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) return 0; - return -ewma_beacon_signal_read(&sdata->deflink.u.mgd.ave_beacon_signal); + if (link_id < 0) + link_data = &sdata->deflink; + else + link_data = wiphy_dereference(sdata->local->hw.wiphy, + sdata->link[link_id]); + + if (WARN_ON_ONCE(!link_data)) + return -99; + + return -ewma_beacon_signal_read(&link_data->u.mgd.ave_beacon_signal); } EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); @@ -3953,6 +3971,33 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local, return radar_detect; } +bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy, + struct cfg80211_scan_request *scan_req, + int radio_idx) +{ + struct ieee80211_channel *chan; + int i, chan_radio_idx; + + for (i = 0; i < scan_req->n_channels; i++) { + chan = scan_req->channels[i]; + chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan); + /* + * The chan_radio_idx should be valid since it's taken from a + * valid scan request. + * However, if chan_radio_idx is unexpectedly invalid (negative), + * we take a conservative approach and assume the scan request + * might use the specified radio_idx. Hence, return true. + */ + if (WARN_ON(chan_radio_idx < 0)) + return true; + + if (chan_radio_idx == radio_idx) + return true; + } + + return false; +} + static u32 __ieee80211_get_radio_mask(struct ieee80211_sub_if_data *sdata) { diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index d536c97144e9..47d7dfd9ad09 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -81,8 +81,8 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index) if (index < net->mpls.platform_labels) { struct mpls_route __rcu **platform_label = - rcu_dereference(net->mpls.platform_label); - rt = rcu_dereference(platform_label[index]); + rcu_dereference_rtnl(net->mpls.platform_label); + rt = rcu_dereference_rtnl(platform_label[index]); } return rt; } diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 1306d4dc287b..feb01747d7d8 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -270,7 +270,8 @@ int mptcp_pm_mp_prio_send_ack(struct mptcp_sock *msk, static void mptcp_pm_add_timer(struct timer_list *timer) { - struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer); + struct mptcp_pm_add_entry *entry = timer_container_of(entry, timer, + add_timer); struct mptcp_sock *msk = entry->sock; struct sock *sk = (struct sock *)msk; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 0749733ea897..edf14c2c2062 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2203,8 +2203,8 @@ out_err: static void mptcp_retransmit_timer(struct timer_list *t) { - struct inet_connection_sock *icsk = from_timer(icsk, t, - icsk_retransmit_timer); + struct inet_connection_sock *icsk = timer_container_of(icsk, t, + icsk_retransmit_timer); struct sock *sk = &icsk->icsk_inet.sk; struct mptcp_sock *msk = mptcp_sk(sk); @@ -2223,7 +2223,7 @@ static void mptcp_retransmit_timer(struct timer_list *t) static void mptcp_tout_timer(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); mptcp_schedule_work(sk); sock_put(sk); diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h index e76c6de0c784..adee6dcabdc3 100644 --- a/net/ncsi/internal.h +++ b/net/ncsi/internal.h @@ -110,7 +110,7 @@ struct ncsi_channel_version { u8 update; /* NCSI version update */ char alpha1; /* NCSI version alpha1 */ char alpha2; /* NCSI version alpha2 */ - u8 fw_name[12]; /* Firmware name string */ + u8 fw_name[12 + 1]; /* Firmware name string */ u32 fw_version; /* Firmware version */ u16 pci_ids[4]; /* PCI identification */ u32 mf_id; /* Manufacture ID */ diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index b36947063783..446e4e3b9553 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -88,7 +88,7 @@ report: static void ncsi_channel_monitor(struct timer_list *t) { - struct ncsi_channel *nc = from_timer(nc, t, monitor.timer); + struct ncsi_channel *nc = timer_container_of(nc, t, monitor.timer); struct ncsi_package *np = nc->package; struct ncsi_dev_priv *ndp = np->ndp; struct ncsi_channel_mode *ncm; @@ -430,7 +430,7 @@ struct ncsi_dev *ncsi_find_dev(struct net_device *dev) static void ncsi_request_timeout(struct timer_list *t) { - struct ncsi_request *nr = from_timer(nr, t, timer); + struct ncsi_request *nr = timer_container_of(nr, t, timer); struct ncsi_dev_priv *ndp = nr->ndp; struct ncsi_cmd_pkt *cmd; struct ncsi_package *np; diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 472cc68ad86f..271ec6c3929e 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -775,6 +775,7 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr) ncv->alpha1 = rsp->alpha1; ncv->alpha2 = rsp->alpha2; memcpy(ncv->fw_name, rsp->fw_name, 12); + ncv->fw_name[12] = '\0'; ncv->fw_version = ntohl(rsp->fw_version); for (i = 0; i < ARRAY_SIZE(ncv->pci_ids); i++) ncv->pci_ids[i] = ntohs(rsp->pci_ids[i]); diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h index 6ae042f702d2..798c7993635e 100644 --- a/net/netfilter/ipset/ip_set_bitmap_gen.h +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h @@ -264,7 +264,7 @@ out: static void mtype_gc(struct timer_list *t) { - struct mtype *map = from_timer(map, t, gc); + struct mtype *map = timer_container_of(map, t, gc); struct ip_set *set = map->set; void *x; u32 id; diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index db794fe1300e..13c7a08aa868 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -571,7 +571,7 @@ static const struct ip_set_type_variant set_variant = { static void list_set_gc(struct timer_list *t) { - struct list_set *map = from_timer(map, t, gc); + struct list_set *map = timer_container_of(map, t, gc); struct ip_set *set = map->set; spin_lock_bh(&set->lock); diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 8699944c0baf..44b2ad695c15 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -846,7 +846,7 @@ static void ip_vs_conn_del_put(struct ip_vs_conn *cp) static void ip_vs_conn_expire(struct timer_list *t) { - struct ip_vs_conn *cp = from_timer(cp, t, timer); + struct ip_vs_conn *cp = timer_container_of(cp, t, timer); struct netns_ipvs *ipvs = cp->ipvs; /* diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 7d5b7418f8c7..6a6fc4478533 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1331,7 +1331,8 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) static void ip_vs_dest_trash_expire(struct timer_list *t) { - struct netns_ipvs *ipvs = from_timer(ipvs, t, dest_trash_timer); + struct netns_ipvs *ipvs = timer_container_of(ipvs, t, + dest_trash_timer); struct ip_vs_dest *dest, *next; unsigned long now = jiffies; diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 2423513d701d..156181a3bacd 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -292,7 +292,8 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc) */ static void ip_vs_lblc_check_expire(struct timer_list *t) { - struct ip_vs_lblc_table *tbl = from_timer(tbl, t, periodic_timer); + struct ip_vs_lblc_table *tbl = timer_container_of(tbl, t, + periodic_timer); struct ip_vs_service *svc = tbl->svc; unsigned long now = jiffies; int goal; diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index cdb1d4bf6761..a021e6aba3d7 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -456,7 +456,8 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc) */ static void ip_vs_lblcr_check_expire(struct timer_list *t) { - struct ip_vs_lblcr_table *tbl = from_timer(tbl, t, periodic_timer); + struct ip_vs_lblcr_table *tbl = timer_container_of(tbl, t, + periodic_timer); struct ip_vs_service *svc = tbl->svc; unsigned long now = jiffies; int goal; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 21d22fa22e4e..cfc2daa3fc7f 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report); static void nf_ct_expectation_timed_out(struct timer_list *t) { - struct nf_conntrack_expect *exp = from_timer(exp, t, timeout); + struct nf_conntrack_expect *exp = timer_container_of(exp, t, timeout); spin_lock_bh(&nf_conntrack_expect_lock); nf_ct_unlink_expect(exp); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index aad84aabd7f1..f391cd267922 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -248,7 +248,7 @@ static noinline bool nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_ct) { - static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT; + static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST; const struct nf_conntrack_tuple_hash *thash; const struct nf_conntrack_zone *zone; struct nf_conn *ct; @@ -287,8 +287,14 @@ nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple, zone = nf_ct_zone(ignored_ct); thash = nf_conntrack_find_get(net, zone, tuple); - if (unlikely(!thash)) /* clashing entry went away */ - return false; + if (unlikely(!thash)) { + struct nf_conntrack_tuple reply; + + nf_ct_invert_tuple(&reply, tuple); + thash = nf_conntrack_find_get(net, zone, &reply); + if (!thash) /* clashing entry went away */ + return false; + } ct = nf_ct_tuplehash_to_ctrack(thash); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 882962f3c84d..bfcb9cd335bf 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -390,7 +390,7 @@ __nfulnl_flush(struct nfulnl_instance *inst) static void nfulnl_timer(struct timer_list *t) { - struct nfulnl_instance *inst = from_timer(inst, t, timer); + struct nfulnl_instance *inst = timer_container_of(inst, t, timer); spin_lock_bh(&inst->lock); if (inst->skb) diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c index c15db28c5ebc..be7c16c79f71 100644 --- a/net/netfilter/nft_set_pipapo_avx2.c +++ b/net/netfilter/nft_set_pipapo_avx2.c @@ -1114,6 +1114,25 @@ bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features, } /** + * pipapo_resmap_init_avx2() - Initialise result map before first use + * @m: Matching data, including mapping table + * @res_map: Result map + * + * Like pipapo_resmap_init() but do not set start map bits covered by the first field. + */ +static inline void pipapo_resmap_init_avx2(const struct nft_pipapo_match *m, unsigned long *res_map) +{ + const struct nft_pipapo_field *f = m->f; + int i; + + /* Starting map doesn't need to be set to all-ones for this implementation, + * but we do need to zero the remaining bits, if any. + */ + for (i = f->bsize; i < m->bsize_max; i++) + res_map[i] = 0ul; +} + +/** * nft_pipapo_avx2_lookup() - Lookup function for AVX2 implementation * @net: Network namespace * @set: nftables API set representation @@ -1171,7 +1190,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, res = scratch->map + (map_index ? m->bsize_max : 0); fill = scratch->map + (map_index ? 0 : m->bsize_max); - /* Starting map doesn't need to be set for this implementation */ + pipapo_resmap_init_avx2(m, res); nft_pipapo_avx2_prepare(); diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 9082155ee558..d73957592c9d 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -100,7 +100,7 @@ static void idletimer_tg_work(struct work_struct *work) static void idletimer_tg_expired(struct timer_list *t) { - struct idletimer_tg *timer = from_timer(timer, t, timer); + struct idletimer_tg *timer = timer_container_of(timer, t, timer); pr_debug("timer %s expired\n", timer->attr.attr.name); diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c index 8a80fd76fe45..90dcf088071a 100644 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c @@ -72,8 +72,9 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par) static void led_timeout_callback(struct timer_list *t) { - struct xt_led_info_internal *ledinternal = from_timer(ledinternal, t, - timer); + struct xt_led_info_internal *ledinternal = timer_container_of(ledinternal, + t, + timer); led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); } diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 6ea16138582c..33b77084a4e5 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -1165,8 +1165,10 @@ int netlbl_conn_setattr(struct sock *sk, break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - if (sk->sk_family != AF_INET6) - return -EAFNOSUPPORT; + if (sk->sk_family != AF_INET6) { + ret_val = -EAFNOSUPPORT; + goto conn_setattr_return; + } addr6 = (struct sockaddr_in6 *)addr; entry = netlbl_domhsh_getentry_af6(secattr->domain, diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 6ee148f0e6d0..3331669d8e33 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -240,7 +240,7 @@ void nr_destroy_socket(struct sock *); */ static void nr_destroy_timer(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); bh_lock_sock(sk); sock_hold(sk); nr_destroy_socket(sk); diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index 5e3ca068f04e..b3a62b1f3a09 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -111,7 +111,7 @@ int nr_t1timer_running(struct sock *sk) static void nr_heartbeat_expiry(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); struct nr_sock *nr = nr_sk(sk); bh_lock_sock(sk); @@ -152,7 +152,7 @@ out: static void nr_t2timer_expiry(struct timer_list *t) { - struct nr_sock *nr = from_timer(nr, t, t2timer); + struct nr_sock *nr = timer_container_of(nr, t, t2timer); struct sock *sk = &nr->sock; bh_lock_sock(sk); @@ -166,7 +166,7 @@ static void nr_t2timer_expiry(struct timer_list *t) static void nr_t4timer_expiry(struct timer_list *t) { - struct nr_sock *nr = from_timer(nr, t, t4timer); + struct nr_sock *nr = timer_container_of(nr, t, t4timer); struct sock *sk = &nr->sock; bh_lock_sock(sk); @@ -177,7 +177,7 @@ static void nr_t4timer_expiry(struct timer_list *t) static void nr_idletimer_expiry(struct timer_list *t) { - struct nr_sock *nr = from_timer(nr, t, idletimer); + struct nr_sock *nr = timer_container_of(nr, t, idletimer); struct sock *sk = &nr->sock; bh_lock_sock(sk); @@ -206,7 +206,7 @@ static void nr_idletimer_expiry(struct timer_list *t) static void nr_t1timer_expiry(struct timer_list *t) { - struct nr_sock *nr = from_timer(nr, t, t1timer); + struct nr_sock *nr = timer_container_of(nr, t, t1timer); struct sock *sk = &nr->sock; bh_lock_sock(sk); diff --git a/net/nfc/core.c b/net/nfc/core.c index 75ed8a9146ba..ae1c842f9c64 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -1010,7 +1010,7 @@ exit: static void nfc_check_pres_timeout(struct timer_list *t) { - struct nfc_dev *dev = from_timer(dev, t, check_pres_timer); + struct nfc_dev *dev = timer_container_of(dev, t, check_pres_timer); schedule_work(&dev->check_pres_work); } diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index aa493344d93e..8618d57c23da 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -441,7 +441,7 @@ exit_noskb: static void nfc_hci_cmd_timeout(struct timer_list *t) { - struct nfc_hci_dev *hdev = from_timer(hdev, t, cmd_timer); + struct nfc_hci_dev *hdev = timer_container_of(hdev, t, cmd_timer); schedule_work(&hdev->msg_tx_work); } diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c index ce9c683a3ead..4fc37894860c 100644 --- a/net/nfc/hci/llc_shdlc.c +++ b/net/nfc/hci/llc_shdlc.c @@ -564,14 +564,14 @@ static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc) static void llc_shdlc_connect_timeout(struct timer_list *t) { - struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer); + struct llc_shdlc *shdlc = timer_container_of(shdlc, t, connect_timer); schedule_work(&shdlc->sm_work); } static void llc_shdlc_t1_timeout(struct timer_list *t) { - struct llc_shdlc *shdlc = from_timer(shdlc, t, t1_timer); + struct llc_shdlc *shdlc = timer_container_of(shdlc, t, t1_timer); pr_debug("SoftIRQ: need to send ack\n"); @@ -580,7 +580,7 @@ static void llc_shdlc_t1_timeout(struct timer_list *t) static void llc_shdlc_t2_timeout(struct timer_list *t) { - struct llc_shdlc *shdlc = from_timer(shdlc, t, t2_timer); + struct llc_shdlc *shdlc = timer_container_of(shdlc, t, t2_timer); pr_debug("SoftIRQ: need to retransmit\n"); diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index 27e863f96ed1..beeb3b4d28ca 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -243,7 +243,8 @@ static void nfc_llcp_timeout_work(struct work_struct *work) static void nfc_llcp_symm_timer(struct timer_list *t) { - struct nfc_llcp_local *local = from_timer(local, t, link_timer); + struct nfc_llcp_local *local = timer_container_of(local, t, + link_timer); pr_err("SYMM timeout\n"); @@ -286,7 +287,8 @@ static void nfc_llcp_sdreq_timeout_work(struct work_struct *work) static void nfc_llcp_sdreq_timer(struct timer_list *t) { - struct nfc_llcp_local *local = from_timer(local, t, sdreq_timer); + struct nfc_llcp_local *local = timer_container_of(local, t, + sdreq_timer); schedule_work(&local->sdreq_timeout_work); } diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 0171bf3c7016..fc921cd2cdff 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -610,7 +610,7 @@ static int nci_close_device(struct nci_dev *ndev) /* NCI command timer function */ static void nci_cmd_timer(struct timer_list *t) { - struct nci_dev *ndev = from_timer(ndev, t, cmd_timer); + struct nci_dev *ndev = timer_container_of(ndev, t, cmd_timer); atomic_set(&ndev->cmd_cnt, 1); queue_work(ndev->cmd_wq, &ndev->cmd_work); @@ -619,7 +619,7 @@ static void nci_cmd_timer(struct timer_list *t) /* NCI data exchange timer function */ static void nci_data_timer(struct timer_list *t) { - struct nci_dev *ndev = from_timer(ndev, t, data_timer); + struct nci_dev *ndev = timer_container_of(ndev, t, data_timer); set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); queue_work(ndev->rx_wq, &ndev->rx_work); diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c index ed1508a9e093..aab107727f18 100644 --- a/net/nfc/nci/uart.c +++ b/net/nfc/nci/uart.c @@ -119,22 +119,22 @@ static int nci_uart_set_driver(struct tty_struct *tty, unsigned int driver) memcpy(nu, nci_uart_drivers[driver], sizeof(struct nci_uart)); nu->tty = tty; - tty->disc_data = nu; skb_queue_head_init(&nu->tx_q); INIT_WORK(&nu->write_work, nci_uart_write_work); spin_lock_init(&nu->rx_lock); ret = nu->ops.open(nu); if (ret) { - tty->disc_data = NULL; kfree(nu); + return ret; } else if (!try_module_get(nu->owner)) { nu->ops.close(nu); - tty->disc_data = NULL; kfree(nu); return -ENOENT; } - return ret; + tty->disc_data = nu; + + return 0; } /* ------ LDISC part ------ */ diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 6a40b8d0350d..a18e2c503da6 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1192,7 +1192,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) continue; uri = nla_data(sdp_attrs[NFC_SDP_ATTR_URI]); - if (uri == NULL || *uri == 0) + if (*uri == 0) continue; tid = local->sdreq_next_tid++; @@ -1540,10 +1540,6 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info) } apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]); - if (!apdu) { - rc = -EINVAL; - goto put_dev; - } ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL); if (!ctx) { diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index e7269a3eec79..3add108340bf 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -39,16 +39,14 @@ #include "flow_netlink.h" #include "openvswitch_trace.h" -DEFINE_PER_CPU(struct ovs_pcpu_storage, ovs_pcpu_storage) = { - .bh_lock = INIT_LOCAL_LOCK(bh_lock), -}; +struct ovs_pcpu_storage __percpu *ovs_pcpu_storage; /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys' * space. Return NULL if out of key spaces. */ static struct sw_flow_key *clone_key(const struct sw_flow_key *key_) { - struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(&ovs_pcpu_storage); + struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(ovs_pcpu_storage); struct action_flow_keys *keys = &ovs_pcpu->flow_keys; int level = ovs_pcpu->exec_level; struct sw_flow_key *key = NULL; @@ -94,7 +92,7 @@ static struct deferred_action *add_deferred_actions(struct sk_buff *skb, const struct nlattr *actions, const int actions_len) { - struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage.action_fifos); + struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage->action_fifos); struct deferred_action *da; da = action_fifo_put(fifo); @@ -755,7 +753,7 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct ovs_frag_data *data = this_cpu_ptr(&ovs_pcpu_storage.frag_data); + struct ovs_frag_data *data = this_cpu_ptr(&ovs_pcpu_storage->frag_data); struct vport *vport = data->vport; if (skb_cow_head(skb, data->l2_len) < 0) { @@ -807,7 +805,7 @@ static void prepare_frag(struct vport *vport, struct sk_buff *skb, unsigned int hlen = skb_network_offset(skb); struct ovs_frag_data *data; - data = this_cpu_ptr(&ovs_pcpu_storage.frag_data); + data = this_cpu_ptr(&ovs_pcpu_storage->frag_data); data->dst = skb->_skb_refdst; data->vport = vport; data->cb = *OVS_CB(skb); @@ -1566,16 +1564,15 @@ static int clone_execute(struct datapath *dp, struct sk_buff *skb, clone = clone_flow_key ? clone_key(key) : key; if (clone) { int err = 0; - if (actions) { /* Sample action */ if (clone_flow_key) - __this_cpu_inc(ovs_pcpu_storage.exec_level); + __this_cpu_inc(ovs_pcpu_storage->exec_level); err = do_execute_actions(dp, skb, clone, actions, len); if (clone_flow_key) - __this_cpu_dec(ovs_pcpu_storage.exec_level); + __this_cpu_dec(ovs_pcpu_storage->exec_level); } else { /* Recirc action */ clone->recirc_id = recirc_id; ovs_dp_process_packet(skb, clone); @@ -1611,7 +1608,7 @@ static int clone_execute(struct datapath *dp, struct sk_buff *skb, static void process_deferred_actions(struct datapath *dp) { - struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage.action_fifos); + struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage->action_fifos); /* Do not touch the FIFO in case there is no deferred actions. */ if (action_fifo_is_empty(fifo)) @@ -1642,7 +1639,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, { int err, level; - level = __this_cpu_inc_return(ovs_pcpu_storage.exec_level); + level = __this_cpu_inc_return(ovs_pcpu_storage->exec_level); if (unlikely(level > OVS_RECURSION_LIMIT)) { net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n", ovs_dp_name(dp)); @@ -1659,6 +1656,6 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, process_deferred_actions(dp); out: - __this_cpu_dec(ovs_pcpu_storage.exec_level); + __this_cpu_dec(ovs_pcpu_storage->exec_level); return err; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 6a304ae2d959..b990dc83504f 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -244,7 +244,7 @@ void ovs_dp_detach_port(struct vport *p) /* Must be called with rcu_read_lock. */ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) { - struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(&ovs_pcpu_storage); + struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(ovs_pcpu_storage); const struct vport *p = OVS_CB(skb)->input_vport; struct datapath *dp = p->dp; struct sw_flow *flow; @@ -299,7 +299,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) * avoided. */ if (IS_ENABLED(CONFIG_PREEMPT_RT) && ovs_pcpu->owner != current) { - local_lock_nested_bh(&ovs_pcpu_storage.bh_lock); + local_lock_nested_bh(&ovs_pcpu_storage->bh_lock); ovs_pcpu->owner = current; ovs_pcpu_locked = true; } @@ -310,7 +310,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) ovs_dp_name(dp), error); if (ovs_pcpu_locked) { ovs_pcpu->owner = NULL; - local_unlock_nested_bh(&ovs_pcpu_storage.bh_lock); + local_unlock_nested_bh(&ovs_pcpu_storage->bh_lock); } stats_counter = &stats->n_hit; @@ -689,13 +689,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) sf_acts = rcu_dereference(flow->sf_acts); local_bh_disable(); - local_lock_nested_bh(&ovs_pcpu_storage.bh_lock); + local_lock_nested_bh(&ovs_pcpu_storage->bh_lock); if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_write(ovs_pcpu_storage.owner, current); + this_cpu_write(ovs_pcpu_storage->owner, current); err = ovs_execute_actions(dp, packet, sf_acts, &flow->key); if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_write(ovs_pcpu_storage.owner, NULL); - local_unlock_nested_bh(&ovs_pcpu_storage.bh_lock); + this_cpu_write(ovs_pcpu_storage->owner, NULL); + local_unlock_nested_bh(&ovs_pcpu_storage->bh_lock); local_bh_enable(); rcu_read_unlock(); @@ -2744,6 +2744,28 @@ static struct drop_reason_list drop_reason_list_ovs = { .n_reasons = ARRAY_SIZE(ovs_drop_reasons), }; +static int __init ovs_alloc_percpu_storage(void) +{ + unsigned int cpu; + + ovs_pcpu_storage = alloc_percpu(*ovs_pcpu_storage); + if (!ovs_pcpu_storage) + return -ENOMEM; + + for_each_possible_cpu(cpu) { + struct ovs_pcpu_storage *ovs_pcpu; + + ovs_pcpu = per_cpu_ptr(ovs_pcpu_storage, cpu); + local_lock_init(&ovs_pcpu->bh_lock); + } + return 0; +} + +static void ovs_free_percpu_storage(void) +{ + free_percpu(ovs_pcpu_storage); +} + static int __init dp_init(void) { int err; @@ -2753,6 +2775,10 @@ static int __init dp_init(void) pr_info("Open vSwitch switching datapath\n"); + err = ovs_alloc_percpu_storage(); + if (err) + goto error; + err = ovs_internal_dev_rtnl_link_register(); if (err) goto error; @@ -2799,6 +2825,7 @@ error_flow_exit: error_unreg_rtnl_link: ovs_internal_dev_rtnl_link_unregister(); error: + ovs_free_percpu_storage(); return err; } @@ -2813,6 +2840,7 @@ static void dp_cleanup(void) ovs_vport_exit(); ovs_flow_exit(); ovs_internal_dev_rtnl_link_unregister(); + ovs_free_percpu_storage(); } module_init(dp_init); diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 1b5348b0f559..cfeb817a1889 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -220,7 +220,8 @@ struct ovs_pcpu_storage { struct task_struct *owner; local_lock_t bh_lock; }; -DECLARE_PER_CPU(struct ovs_pcpu_storage, ovs_pcpu_storage); + +extern struct ovs_pcpu_storage __percpu *ovs_pcpu_storage; /** * enum ovs_pkt_hash_types - hash info to include with a packet diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 20be2c47cf41..3d43f3eae759 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -722,7 +722,7 @@ static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) static void prb_retire_rx_blk_timer_expired(struct timer_list *t) { struct packet_sock *po = - from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); + timer_container_of(po, t, rx_ring.prb_bdqc.retire_blk_timer); struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); unsigned int frozen; struct tpacket_block_desc *pbd; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index a4a668b88a8f..4e72b636a46a 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -345,7 +345,7 @@ void rose_destroy_socket(struct sock *); */ static void rose_destroy_timer(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); rose_destroy_socket(sk); } diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index 9f9629e6fdae..7746229fdc8c 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c @@ -78,7 +78,7 @@ static void rose_ftimer_expiry(struct timer_list *t) static void rose_t0timer_expiry(struct timer_list *t) { - struct rose_neigh *neigh = from_timer(neigh, t, t0timer); + struct rose_neigh *neigh = timer_container_of(neigh, t, t0timer); rose_transmit_restart_request(neigh); diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c index 1525773e94aa..020369c49587 100644 --- a/net/rose/rose_timer.c +++ b/net/rose/rose_timer.c @@ -118,7 +118,7 @@ void rose_stop_idletimer(struct sock *sk) static void rose_heartbeat_expiry(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); struct rose_sock *rose = rose_sk(sk); bh_lock_sock(sk); @@ -163,7 +163,7 @@ out: static void rose_timer_expiry(struct timer_list *t) { - struct rose_sock *rose = from_timer(rose, t, timer); + struct rose_sock *rose = timer_container_of(rose, t, timer); struct sock *sk = &rose->sock; bh_lock_sock(sk); @@ -198,7 +198,7 @@ out: static void rose_idletimer_expiry(struct timer_list *t) { - struct rose_sock *rose = from_timer(rose, t, idletimer); + struct rose_sock *rose = timer_container_of(rose, t, idletimer); struct sock *sk = &rose->sock; bh_lock_sock(sk); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index e9e8f0ef3fd5..15067ff7b1f2 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -64,7 +64,7 @@ void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what) static void rxrpc_call_timer_expired(struct timer_list *t) { - struct rxrpc_call *call = from_timer(call, t, timer); + struct rxrpc_call *call = timer_container_of(call, t, timer); _enter("%d", call->debug_id); diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c index 1f7c136d6d0e..0a260df45d25 100644 --- a/net/rxrpc/insecure.c +++ b/net/rxrpc/insecure.c @@ -45,8 +45,9 @@ static void none_free_call_crypto(struct rxrpc_call *call) static bool none_validate_challenge(struct rxrpc_connection *conn, struct sk_buff *skb) { - return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, - rxrpc_eproto_rxnull_challenge); + rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, + rxrpc_eproto_rxnull_challenge); + return true; } static int none_sendmsg_respond_to_challenge(struct sk_buff *challenge, diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 5c2580a07530..5693b41b093f 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -345,7 +345,7 @@ TC_INDIRECT_SCOPE int flow_classify(struct sk_buff *skb, static void flow_perturbation(struct timer_list *t) { - struct flow_filter *f = from_timer(f, t, perturb_timer); + struct flow_filter *f = timer_container_of(f, t, perturb_timer); get_random_bytes(&f->hashrnd, 4); if (f->perturb_period) diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 2c069f0181c6..037f764822b9 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -661,7 +661,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, for (i = q->nbands; i < oldbands; i++) { if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) list_del_init(&q->classes[i].alist); - qdisc_tree_flush_backlog(q->classes[i].qdisc); + qdisc_purge_queue(q->classes[i].qdisc); } WRITE_ONCE(q->nstrict, nstrict); memcpy(q->prio2band, priomap, sizeof(priomap)); diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index df7fac95ab15..b0e34daf1f75 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -384,7 +384,7 @@ flow_error: static void fq_pie_timer(struct timer_list *t) { - struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer); + struct fq_pie_sched_data *q = timer_container_of(q, t, adapt_timer); unsigned long next, tupdate; struct Qdisc *sch = q->sch; spinlock_t *root_lock; /* to lock qdisc for probability calculations */ diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 08e0e3aff976..16afb834fe4a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -496,7 +496,7 @@ EXPORT_SYMBOL(netif_tx_unlock); static void dev_watchdog(struct timer_list *t) { - struct net_device *dev = from_timer(dev, t, watchdog_timer); + struct net_device *dev = timer_container_of(dev, t, watchdog_timer); bool release = true; spin_lock(&dev->tx_global_lock); diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index ff49a6c97033..ad46ee3ed5a9 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -424,7 +424,7 @@ EXPORT_SYMBOL_GPL(pie_calculate_probability); static void pie_timer(struct timer_list *t) { - struct pie_sched_data *q = from_timer(q, t, adapt_timer); + struct pie_sched_data *q = timer_container_of(q, t, adapt_timer); struct Qdisc *sch = q->sch; spinlock_t *root_lock; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index cc30f7a32f1a..9e2b9a490db2 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -211,7 +211,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt, memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); for (i = q->bands; i < oldbands; i++) - qdisc_tree_flush_backlog(q->queues[i]); + qdisc_purge_queue(q->queues[i]); for (i = oldbands; i < q->bands; i++) { q->queues[i] = queues[i]; diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 1ba3e0bba54f..479c42d11083 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -285,7 +285,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb, q->userbits = userbits; q->limit = ctl->limit; if (child) { - qdisc_tree_flush_backlog(q->qdisc); + qdisc_purge_queue(q->qdisc); old_child = q->qdisc; q->qdisc = child; } @@ -321,7 +321,7 @@ unlock_out: static inline void red_adaptative_timer(struct timer_list *t) { - struct red_sched_data *q = from_timer(q, t, adapt_timer); + struct red_sched_data *q = timer_container_of(q, t, adapt_timer); struct Qdisc *sch = q->sch; spinlock_t *root_lock; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index b912ad99aa15..96eb2f122973 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -310,7 +310,10 @@ drop: /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ x = q->tail->next; slot = &q->slots[x]; - q->tail->next = slot->next; + if (slot->next == x) + q->tail = NULL; /* no more active slots */ + else + q->tail->next = slot->next; q->ht[slot->hash] = SFQ_EMPTY_SLOT; goto drop; } @@ -597,7 +600,7 @@ drop: static void sfq_perturbation(struct timer_list *t) { - struct sfq_sched_data *q = from_timer(q, t, perturb_timer); + struct sfq_sched_data *q = timer_container_of(q, t, perturb_timer); struct Qdisc *sch = q->sch; spinlock_t *root_lock; siphash_key_t nkey; @@ -653,6 +656,14 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt, NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); return -EINVAL; } + + if (ctl->perturb_period < 0 || + ctl->perturb_period > INT_MAX / HZ) { + NL_SET_ERR_MSG_MOD(extack, "invalid perturb period"); + return -EINVAL; + } + perturb_period = ctl->perturb_period * HZ; + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Scell_log, NULL)) return -EINVAL; @@ -669,14 +680,12 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt, headdrop = q->headdrop; maxdepth = q->maxdepth; maxflows = q->maxflows; - perturb_period = q->perturb_period; quantum = q->quantum; flags = q->flags; /* update and validate configuration */ if (ctl->quantum) quantum = ctl->quantum; - perturb_period = ctl->perturb_period * HZ; if (ctl->flows) maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); if (ctl->divisor) { diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 14021b812329..2b14c81a87e5 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1328,13 +1328,15 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, stab = rtnl_dereference(q->root->stab); - oper = rtnl_dereference(q->oper_sched); + rcu_read_lock(); + oper = rcu_dereference(q->oper_sched); if (oper) taprio_update_queue_max_sdu(q, oper, stab); - admin = rtnl_dereference(q->admin_sched); + admin = rcu_dereference(q->admin_sched); if (admin) taprio_update_queue_max_sdu(q, admin, stab); + rcu_read_unlock(); break; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index dc26b22d53c7..4c977f049670 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -452,7 +452,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, sch_tree_lock(sch); if (child) { - qdisc_tree_flush_backlog(q->qdisc); + qdisc_purge_queue(q->qdisc); old = q->qdisc; q->qdisc = child; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index a9ed2ccab1bd..3336dcfb4515 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -261,9 +261,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t) skb_set_inner_ipproto(skb, IPPROTO_SCTP); label = ip6_make_flowlabel(sock_net(sk), skb, fl6->flowlabel, true, fl6); - return udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr, - &fl6->daddr, tclass, ip6_dst_hoplimit(dst), - label, sctp_sk(sk)->udp_port, t->encap_port, false); + udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr, &fl6->daddr, + tclass, ip6_dst_hoplimit(dst), label, + sctp_sk(sk)->udp_port, t->encap_port, false, 0); + return 0; } /* Returns the dst cache entry for the given source and destination ip diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 8c3b80c4d40b..a5ccada55f2b 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -631,7 +631,7 @@ static void sctp_v4_ecn_capable(struct sock *sk) static void sctp_addr_wq_timeout_handler(struct timer_list *t) { - struct net *net = from_timer(net, t, sctp.addr_wq_timer); + struct net *net = timer_container_of(net, t, sctp.addr_wq_timer); struct sctp_sockaddr_entry *addrw, *temp; struct sctp_sock *sp; @@ -1103,7 +1103,8 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t) skb_set_inner_ipproto(skb, IPPROTO_SCTP); udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr, fl4->daddr, dscp, ip4_dst_hoplimit(dst), df, - sctp_sk(sk)->udp_port, t->encap_port, false, false); + sctp_sk(sk)->udp_port, t->encap_port, false, false, + 0); return 0; } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 3aa5da5e3bbd..424f10a6fdba 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -231,7 +231,7 @@ nomem: void sctp_generate_t3_rtx_event(struct timer_list *t) { struct sctp_transport *transport = - from_timer(transport, t, T3_rtx_timer); + timer_container_of(transport, t, T3_rtx_timer); struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); @@ -308,7 +308,8 @@ out_unlock: static void sctp_generate_t1_cookie_event(struct timer_list *t) { struct sctp_association *asoc = - from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]); + timer_container_of(asoc, t, + timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]); sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); } @@ -316,7 +317,8 @@ static void sctp_generate_t1_cookie_event(struct timer_list *t) static void sctp_generate_t1_init_event(struct timer_list *t) { struct sctp_association *asoc = - from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]); + timer_container_of(asoc, t, + timers[SCTP_EVENT_TIMEOUT_T1_INIT]); sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); } @@ -324,7 +326,8 @@ static void sctp_generate_t1_init_event(struct timer_list *t) static void sctp_generate_t2_shutdown_event(struct timer_list *t) { struct sctp_association *asoc = - from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]); + timer_container_of(asoc, t, + timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]); sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); } @@ -332,7 +335,7 @@ static void sctp_generate_t2_shutdown_event(struct timer_list *t) static void sctp_generate_t4_rto_event(struct timer_list *t) { struct sctp_association *asoc = - from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]); + timer_container_of(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]); sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); } @@ -340,8 +343,8 @@ static void sctp_generate_t4_rto_event(struct timer_list *t) static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t) { struct sctp_association *asoc = - from_timer(asoc, t, - timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]); + timer_container_of(asoc, t, + timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]); sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); @@ -351,7 +354,8 @@ static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t) static void sctp_generate_autoclose_event(struct timer_list *t) { struct sctp_association *asoc = - from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]); + timer_container_of(asoc, t, + timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]); sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); } @@ -361,7 +365,8 @@ static void sctp_generate_autoclose_event(struct timer_list *t) */ void sctp_generate_heartbeat_event(struct timer_list *t) { - struct sctp_transport *transport = from_timer(transport, t, hb_timer); + struct sctp_transport *transport = timer_container_of(transport, t, + hb_timer); struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); @@ -407,7 +412,7 @@ out_unlock: void sctp_generate_proto_unreach_event(struct timer_list *t) { struct sctp_transport *transport = - from_timer(transport, t, proto_unreach_timer); + timer_container_of(transport, t, proto_unreach_timer); struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); @@ -442,7 +447,7 @@ out_unlock: void sctp_generate_reconf_event(struct timer_list *t) { struct sctp_transport *transport = - from_timer(transport, t, reconf_timer); + timer_container_of(transport, t, reconf_timer); struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); @@ -478,7 +483,8 @@ out_unlock: /* Handle the timeout of the probe timer. */ void sctp_generate_probe_event(struct timer_list *t) { - struct sctp_transport *transport = from_timer(transport, t, probe_timer); + struct sctp_transport *transport = timer_container_of(transport, t, + probe_timer); struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); @@ -511,7 +517,7 @@ out_unlock: static void sctp_generate_sack_event(struct timer_list *t) { struct sctp_association *asoc = - from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]); + timer_container_of(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]); sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); } diff --git a/net/socket.c b/net/socket.c index 9a0e720f0859..2cab805943c0 100644 --- a/net/socket.c +++ b/net/socket.c @@ -843,6 +843,52 @@ static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb, sizeof(ts_pktinfo), &ts_pktinfo); } +bool skb_has_tx_timestamp(struct sk_buff *skb, const struct sock *sk) +{ + const struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); + u32 tsflags = READ_ONCE(sk->sk_tsflags); + + if (serr->ee.ee_errno != ENOMSG || + serr->ee.ee_origin != SO_EE_ORIGIN_TIMESTAMPING) + return false; + + /* software time stamp available and wanted */ + if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) && skb->tstamp) + return true; + /* hardware time stamps available and wanted */ + return (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && + skb_hwtstamps(skb)->hwtstamp; +} + +int skb_get_tx_timestamp(struct sk_buff *skb, struct sock *sk, + struct timespec64 *ts) +{ + u32 tsflags = READ_ONCE(sk->sk_tsflags); + ktime_t hwtstamp; + int if_index = 0; + + if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) && + ktime_to_timespec64_cond(skb->tstamp, ts)) + return SOF_TIMESTAMPING_TX_SOFTWARE; + + if (!(tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) || + skb_is_swtx_tstamp(skb, false)) + return -ENOENT; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV) + hwtstamp = get_timestamp(sk, skb, &if_index); + else + hwtstamp = skb_hwtstamps(skb)->hwtstamp; + + if (tsflags & SOF_TIMESTAMPING_BIND_PHC) + hwtstamp = ptp_convert_timestamp(&hwtstamp, + READ_ONCE(sk->sk_bind_phc)); + if (!ktime_to_timespec64_cond(hwtstamp, ts)) + return -ENOENT; + + return SOF_TIMESTAMPING_TX_HARDWARE; +} + /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 369310909fc9..0fa244f16876 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1545,6 +1545,7 @@ static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr) struct kvec iov; struct xdr_buf verf_buf; int status; + u32 seqno; /* Credential */ @@ -1556,15 +1557,16 @@ static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr) cred_len = p++; spin_lock(&ctx->gc_seq_lock); - req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; + seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; + xprt_rqst_add_seqno(req, seqno); spin_unlock(&ctx->gc_seq_lock); - if (req->rq_seqno == MAXSEQ) + if (*req->rq_seqnos == MAXSEQ) goto expired; trace_rpcgss_seqno(task); *p++ = cpu_to_be32(RPC_GSS_VERSION); *p++ = cpu_to_be32(ctx->gc_proc); - *p++ = cpu_to_be32(req->rq_seqno); + *p++ = cpu_to_be32(*req->rq_seqnos); *p++ = cpu_to_be32(gss_cred->gc_service); p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); *cred_len = cpu_to_be32((p - (cred_len + 1)) << 2); @@ -1678,17 +1680,31 @@ gss_refresh_null(struct rpc_task *task) return 0; } +static u32 +gss_validate_seqno_mic(struct gss_cl_ctx *ctx, u32 seqno, __be32 *seq, __be32 *p, u32 len) +{ + struct kvec iov; + struct xdr_buf verf_buf; + struct xdr_netobj mic; + + *seq = cpu_to_be32(seqno); + iov.iov_base = seq; + iov.iov_len = 4; + xdr_buf_from_iov(&iov, &verf_buf); + mic.data = (u8 *)p; + mic.len = len; + return gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); +} + static int gss_validate(struct rpc_task *task, struct xdr_stream *xdr) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); __be32 *p, *seq = NULL; - struct kvec iov; - struct xdr_buf verf_buf; - struct xdr_netobj mic; u32 len, maj_stat; int status; + int i = 1; /* don't recheck the first item */ p = xdr_inline_decode(xdr, 2 * sizeof(*p)); if (!p) @@ -1705,13 +1721,10 @@ gss_validate(struct rpc_task *task, struct xdr_stream *xdr) seq = kmalloc(4, GFP_KERNEL); if (!seq) goto validate_failed; - *seq = cpu_to_be32(task->tk_rqstp->rq_seqno); - iov.iov_base = seq; - iov.iov_len = 4; - xdr_buf_from_iov(&iov, &verf_buf); - mic.data = (u8 *)p; - mic.len = len; - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); + maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[0], seq, p, len); + /* RFC 2203 5.3.3.1 - compute the checksum of each sequence number in the cache */ + while (unlikely(maj_stat == GSS_S_BAD_SIG && i < task->tk_rqstp->rq_seqno_count)) + maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[i], seq, p, len); if (maj_stat == GSS_S_CONTEXT_EXPIRED) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); if (maj_stat) @@ -1750,7 +1763,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, if (!p) goto wrap_failed; integ_len = p++; - *p = cpu_to_be32(rqstp->rq_seqno); + *p = cpu_to_be32(*rqstp->rq_seqnos); if (rpcauth_wrap_req_encode(task, xdr)) goto wrap_failed; @@ -1847,7 +1860,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, if (!p) goto wrap_failed; opaque_len = p++; - *p = cpu_to_be32(rqstp->rq_seqno); + *p = cpu_to_be32(*rqstp->rq_seqnos); if (rpcauth_wrap_req_encode(task, xdr)) goto wrap_failed; @@ -2001,7 +2014,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred, offset = rcv_buf->len - xdr_stream_remaining(xdr); if (xdr_stream_decode_u32(xdr, &seqno)) goto unwrap_failed; - if (seqno != rqstp->rq_seqno) + if (seqno != *rqstp->rq_seqnos) goto bad_seqno; if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len)) goto unwrap_failed; @@ -2045,7 +2058,7 @@ unwrap_failed: trace_rpcgss_unwrap_failed(task); goto out; bad_seqno: - trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno); + trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, seqno); goto out; bad_mic: trace_rpcgss_verify_mic(task, maj_stat); @@ -2077,7 +2090,7 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, if (maj_stat != GSS_S_COMPLETE) goto bad_unwrap; /* gss_unwrap decrypted the sequence number */ - if (be32_to_cpup(p++) != rqstp->rq_seqno) + if (be32_to_cpup(p++) != *rqstp->rq_seqnos) goto bad_seqno; /* gss_unwrap redacts the opaque blob from the head iovec. @@ -2093,7 +2106,7 @@ unwrap_failed: trace_rpcgss_unwrap_failed(task); return -EIO; bad_seqno: - trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p)); + trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, be32_to_cpup(--p)); return -EIO; bad_unwrap: trace_rpcgss_unwrap(task, maj_stat); @@ -2118,14 +2131,14 @@ gss_xmit_need_reencode(struct rpc_task *task) if (!ctx) goto out; - if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq))) + if (gss_seq_is_newer(*req->rq_seqnos, READ_ONCE(ctx->gc_seq))) goto out_ctx; seq_xmit = READ_ONCE(ctx->gc_seq_xmit); - while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) { + while (gss_seq_is_newer(*req->rq_seqnos, seq_xmit)) { u32 tmp = seq_xmit; - seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno); + seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, *req->rq_seqnos); if (seq_xmit == tmp) { ret = false; goto out_ctx; @@ -2134,7 +2147,7 @@ gss_xmit_need_reencode(struct rpc_task *task) win = ctx->gc_win; if (win > 0) - ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win); + ret = !gss_seq_is_newer(*req->rq_seqnos, seq_xmit - win); out_ctx: gss_put_ctx(ctx); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 6f75862d9782..21426c3049d3 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2771,8 +2771,13 @@ out_verifier: case -EPROTONOSUPPORT: goto out_err; case -EACCES: - /* Re-encode with a fresh cred */ - fallthrough; + /* possible RPCSEC_GSS out-of-sequence event (RFC2203), + * reset recv state and keep waiting, don't retransmit + */ + task->tk_rqstp->rq_reply_bytes_recvd = 0; + task->tk_status = xprt_request_enqueue_receive(task); + task->tk_action = call_transmit_status; + return -EBADMSG; default: goto out_garbage; } diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index cb14d6ddac6c..8b1837228799 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -922,7 +922,7 @@ void svc_send(struct svc_rqst *rqstp) */ static void svc_age_temp_xprts(struct timer_list *t) { - struct svc_serv *serv = from_timer(serv, t, sv_temptimer); + struct svc_serv *serv = timer_container_of(serv, t, sv_temptimer); struct svc_xprt *xprt; struct list_head *le, *next; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 0eab15465511..1023361845f9 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -854,7 +854,7 @@ xprt_schedule_autodisconnect(struct rpc_xprt *xprt) static void xprt_init_autodisconnect(struct timer_list *t) { - struct rpc_xprt *xprt = from_timer(xprt, t, timer); + struct rpc_xprt *xprt = timer_container_of(xprt, t, timer); if (!RB_EMPTY_ROOT(&xprt->recv_queue)) return; @@ -1365,7 +1365,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) INIT_LIST_HEAD(&req->rq_xmit2); goto out; } - } else if (!req->rq_seqno) { + } else if (req->rq_seqno_count == 0) { list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { if (pos->rq_task->tk_owner != task->tk_owner) continue; @@ -1898,6 +1898,7 @@ xprt_request_init(struct rpc_task *task) req->rq_snd_buf.bvec = NULL; req->rq_rcv_buf.bvec = NULL; req->rq_release_snd_buf = NULL; + req->rq_seqno_count = 0; xprt_init_majortimeo(task, req, task->tk_client->cl_timeout); trace_xprt_reserve(req); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 83cc095846d3..04ff66758fc3 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2726,20 +2726,14 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work) if (status) goto out_close; xprt_release_write(lower_xprt, NULL); - trace_rpc_socket_connect(upper_xprt, upper_transport->sock, 0); - if (!xprt_test_and_set_connected(upper_xprt)) { - upper_xprt->connect_cookie++; - clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state); - xprt_clear_connecting(upper_xprt); - - upper_xprt->stat.connect_count++; - upper_xprt->stat.connect_time += (long)jiffies - - upper_xprt->stat.connect_start; - xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING); - } rpc_shutdown_client(lower_clnt); + /* Check for ingress data that arrived before the socket's + * ->data_ready callback was set up. + */ + xs_poll_check_readable(upper_transport); + out_unlock: current_restore_flags(pflags, PF_MEMALLOC); upper_transport->clnt = NULL; diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index f4cfe88670f5..ea5bb131ebd0 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -818,7 +818,11 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, } /* Get net to avoid freed tipc_crypto when delete namespace */ - get_net(aead->crypto->net); + if (!maybe_get_net(aead->crypto->net)) { + tipc_bearer_put(b); + rc = -ENODEV; + goto exit; + } /* Now, do encrypt */ rc = crypto_aead_encrypt(req); diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 685389d4b245..775fd4f3f072 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -292,7 +292,7 @@ void tipc_disc_remove_dest(struct tipc_discoverer *d) */ static void tipc_disc_timeout(struct timer_list *t) { - struct tipc_discoverer *d = from_timer(d, t, timer); + struct tipc_discoverer *d = timer_container_of(d, t, timer); struct tipc_net *tn = tipc_net(d->net); struct tipc_media_addr maddr; struct sk_buff *skb = NULL; diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index b45c5b91bc7a..572b79bf76ce 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -630,7 +630,7 @@ void tipc_mon_get_state(struct net *net, u32 addr, static void mon_timeout(struct timer_list *t) { - struct tipc_monitor *mon = from_timer(mon, t, timer); + struct tipc_monitor *mon = timer_container_of(mon, t, timer); struct tipc_peer *self; int best_member_cnt = dom_size(mon->peer_cnt) - 1; diff --git a/net/tipc/node.c b/net/tipc/node.c index cb43f2016a70..a07fb073368c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -800,7 +800,7 @@ static bool tipc_node_cleanup(struct tipc_node *peer) */ static void tipc_node_timeout(struct timer_list *t) { - struct tipc_node *n = from_timer(n, t, timer); + struct tipc_node *n = timer_container_of(n, t, timer); struct tipc_link_entry *le; struct sk_buff_head xmitq; int remains = n->link_cnt; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 65dcbb54f55d..7c61d47ea208 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2862,7 +2862,7 @@ static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list) static void tipc_sk_timeout(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); struct tipc_sock *tsk = tipc_sk(sk); u32 pnode = tsk_peer_node(tsk); struct sk_buff_head list; diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 621addab2834..f8490d94e323 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -105,7 +105,7 @@ void tipc_sub_report_overlap(struct tipc_subscription *sub, static void tipc_sub_timeout(struct timer_list *t) { - struct tipc_subscription *sub = from_timer(sub, t, timer); + struct tipc_subscription *sub = timer_container_of(sub, t, timer); spin_lock(&sub->lock); tipc_sub_send_event(sub, NULL, TIPC_SUBSCR_TIMEOUT); diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 108a4cc2e001..b85ab0fb3b8c 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -172,7 +172,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, struct udp_media_addr *dst, struct dst_cache *cache) { struct dst_entry *ndst; - int ttl, err = 0; + int ttl, err; local_bh_disable(); ndst = dst_cache_get(cache); @@ -197,7 +197,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, ttl = ip4_dst_hoplimit(&rt->dst); udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr, dst->ipv4.s_addr, 0, ttl, 0, src->port, - dst->port, false, true); + dst->port, false, true, 0); #if IS_ENABLED(CONFIG_IPV6) } else { if (!ndst) { @@ -217,13 +217,13 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, dst_cache_set_ip6(cache, ndst, &fl6.saddr); } ttl = ip6_dst_hoplimit(ndst); - err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL, - &src->ipv6, &dst->ipv6, 0, ttl, 0, - src->port, dst->port, false); + udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL, + &src->ipv6, &dst->ipv6, 0, ttl, 0, + src->port, dst->port, false, 0); #endif } local_bh_enable(); - return err; + return 0; tx_error: local_bh_enable(); @@ -489,7 +489,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb) rtnl_lock(); b = tipc_bearer_find(net, bname); - if (!b) { + if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) { rtnl_unlock(); return -EINVAL; } @@ -500,7 +500,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb) rtnl_lock(); b = rtnl_dereference(tn->bearer_list[bid]); - if (!b) { + if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) { rtnl_unlock(); return -EINVAL; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 914d4e1516a3..fc88e34b7f33 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -908,6 +908,13 @@ more_data: &msg_redir, send, flags); lock_sock(sk); if (err < 0) { + /* Regardless of whether the data represented by + * msg_redir is sent successfully, we have already + * uncharged it via sk_msg_return_zero(). The + * msg->sg.size represents the remaining unprocessed + * data, which needs to be uncharged here. + */ + sk_mem_uncharge(sk, msg->sg.size); *copied -= sk_msg_free_nocharge(sk, &msg_redir); msg->sg.size = 0; } @@ -1120,9 +1127,13 @@ alloc_encrypted: num_async++; else if (ret == -ENOMEM) goto wait_for_memory; - else if (ctx->open_rec && ret == -ENOSPC) + else if (ctx->open_rec && ret == -ENOSPC) { + if (msg_pl->cork_bytes) { + ret = 0; + goto send_end; + } goto rollback_iter; - else if (ret != -EAGAIN) + } else if (ret != -EAGAIN) goto send_end; } continue; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 2e2e9997a68e..22e170fb5dda 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1971,7 +1971,8 @@ static void unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk, if (UNIXCB(skb).pid) return; - if (unix_may_passcred(sk) || unix_may_passcred(other)) { + if (unix_may_passcred(sk) || unix_may_passcred(other) || + !other->sk_socket) { UNIXCB(skb).pid = get_pid(task_tgid(current)); current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); } diff --git a/net/wireless/core.c b/net/wireless/core.c index dcce326fdb8c..f3cd70757ef2 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -995,6 +995,24 @@ int wiphy_register(struct wiphy *wiphy) wiphy->max_num_akm_suites > CFG80211_MAX_NUM_AKM_SUITES) return -EINVAL; + /* Allocate radio configuration space for multi-radio wiphy */ + if (wiphy->n_radio > 0) { + int idx; + + wiphy->radio_cfg = kcalloc(wiphy->n_radio, + sizeof(*wiphy->radio_cfg), + GFP_KERNEL); + if (!wiphy->radio_cfg) + return -ENOMEM; + /* + * Initialize wiphy radio parameters to IEEE 802.11 + * MIB default values. RTS threshold is disabled by + * default with the special -1 value. + */ + for (idx = 0; idx < wiphy->n_radio; idx++) + wiphy->radio_cfg[idx].rts_threshold = (u32)-1; + } + /* check and set up bitrates */ ieee80211_set_bitrate_flags(wiphy); @@ -1222,6 +1240,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) void wiphy_free(struct wiphy *wiphy) { + kfree(wiphy->radio_cfg); put_device(&wiphy->dev); } EXPORT_SYMBOL(wiphy_free); @@ -1709,7 +1728,7 @@ EXPORT_SYMBOL_GPL(wiphy_work_flush); void wiphy_delayed_work_timer(struct timer_list *t) { - struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer); + struct wiphy_delayed_work *dwork = timer_container_of(dwork, t, timer); wiphy_work_queue(dwork->wiphy, &dwork->work); } diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 05d44a443518..29e1ce8aff42 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -1331,7 +1331,8 @@ void cfg80211_mlo_reconf_add_done(struct net_device *dev, lockdep_assert_wiphy(wiphy); trace_cfg80211_mlo_reconf_add_done(dev, data->added_links, - data->buf, data->len); + data->buf, data->len, + data->driver_initiated); if (WARN_ON(!wdev->valid_links)) return; @@ -1361,11 +1362,16 @@ void cfg80211_mlo_reconf_add_done(struct net_device *dev, wdev->links[link_id].client.current_bss = bss_from_pub(bss); + if (data->driver_initiated) + cfg80211_hold_bss(bss_from_pub(bss)); + memcpy(wdev->links[link_id].addr, data->links[link_id].addr, ETH_ALEN); } else { - cfg80211_unhold_bss(bss_from_pub(bss)); + if (!data->driver_initiated) + cfg80211_unhold_bss(bss_from_pub(bss)); + cfg80211_put_bss(wiphy, bss); } } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index fd5f79266471..70ca74a75f22 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -854,6 +854,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MLO_RECONF_REM_LINKS] = { .type = NLA_U16 }, [NL80211_ATTR_EPCS] = { .type = NLA_FLAG }, [NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS] = { .type = NLA_U16 }, + [NL80211_ATTR_WIPHY_RADIO_INDEX] = { .type = NLA_U8 }, }; /* policy for the key attributes */ @@ -1583,7 +1584,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, return result; error: - kfree(result); + kfree_sensitive(result); return ERR_PTR(err); } @@ -2446,6 +2447,7 @@ fail: static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx) { const struct wiphy_radio *r = &wiphy->radio[idx]; + const struct wiphy_radio_cfg *rcfg = &wiphy->radio_cfg[idx]; struct nlattr *radio, *freq; int i; @@ -2456,6 +2458,11 @@ static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx) if (nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_INDEX, idx)) goto nla_put_failure; + if (rcfg->rts_threshold && + nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD, + rcfg->rts_threshold)) + goto nla_put_failure; + if (r->antenna_mask && nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK, r->antenna_mask)) @@ -2639,7 +2646,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, u32 tx_ant = 0, rx_ant = 0; int res; - res = rdev_get_antenna(rdev, &tx_ant, &rx_ant); + res = rdev_get_antenna(rdev, -1, &tx_ant, &rx_ant); if (!res) { if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, @@ -3608,6 +3615,33 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info) return __nl80211_set_channel(rdev, netdev, info, link_id); } +static int nl80211_set_wiphy_radio(struct genl_info *info, + struct cfg80211_registered_device *rdev, + int radio_idx) +{ + u32 rts_threshold = 0, old_rts, changed = 0; + int result; + + if (!rdev->ops->set_wiphy_params) + return -EOPNOTSUPP; + + if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) { + rts_threshold = nla_get_u32( + info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]); + changed |= WIPHY_PARAM_RTS_THRESHOLD; + } + + old_rts = rdev->wiphy.radio_cfg[radio_idx].rts_threshold; + + rdev->wiphy.radio_cfg[radio_idx].rts_threshold = rts_threshold; + + result = rdev_set_wiphy_params(rdev, radio_idx, changed); + if (result) + rdev->wiphy.radio_cfg[radio_idx].rts_threshold = old_rts; + + return 0; +} + static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = NULL; @@ -3620,6 +3654,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) u32 frag_threshold = 0, rts_threshold = 0; u8 coverage_class = 0; u32 txq_limit = 0, txq_memory_limit = 0, txq_quantum = 0; + int radio_idx = -1; rtnl_lock(); /* @@ -3670,6 +3705,19 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) if (result) return result; + if (info->attrs[NL80211_ATTR_WIPHY_RADIO_INDEX]) { + /* Radio idx is not expected for non-multi radio wiphy */ + if (rdev->wiphy.n_radio <= 0) + return -EINVAL; + + radio_idx = nla_get_u8( + info->attrs[NL80211_ATTR_WIPHY_RADIO_INDEX]); + if (radio_idx >= rdev->wiphy.n_radio) + return -EINVAL; + + return nl80211_set_wiphy_radio(info, rdev, radio_idx); + } + if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) { struct ieee80211_txq_params txq_params; struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1]; @@ -3759,7 +3807,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) mbm = nla_get_u32(info->attrs[idx]); } - result = rdev_set_tx_power(rdev, txp_wdev, type, mbm); + result = rdev_set_tx_power(rdev, txp_wdev, radio_idx, type, + mbm); if (result) return result; } @@ -3785,7 +3834,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) tx_ant = tx_ant & rdev->wiphy.available_antennas_tx; rx_ant = rx_ant & rdev->wiphy.available_antennas_rx; - result = rdev_set_antenna(rdev, tx_ant, rx_ant); + result = rdev_set_antenna(rdev, radio_idx, tx_ant, rx_ant); if (result) return result; } @@ -3879,16 +3928,30 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) if (changed) { u8 old_retry_short, old_retry_long; u32 old_frag_threshold, old_rts_threshold; - u8 old_coverage_class; + u8 old_coverage_class, i; u32 old_txq_limit, old_txq_memory_limit, old_txq_quantum; + u32 *old_radio_rts_threshold = NULL; if (!rdev->ops->set_wiphy_params) return -EOPNOTSUPP; + if (rdev->wiphy.n_radio) { + old_radio_rts_threshold = kcalloc(rdev->wiphy.n_radio, + sizeof(u32), + GFP_KERNEL); + if (!old_radio_rts_threshold) + return -ENOMEM; + } + old_retry_short = rdev->wiphy.retry_short; old_retry_long = rdev->wiphy.retry_long; old_frag_threshold = rdev->wiphy.frag_threshold; old_rts_threshold = rdev->wiphy.rts_threshold; + if (old_radio_rts_threshold) { + for (i = 0 ; i < rdev->wiphy.n_radio; i++) + old_radio_rts_threshold[i] = + rdev->wiphy.radio_cfg[i].rts_threshold; + } old_coverage_class = rdev->wiphy.coverage_class; old_txq_limit = rdev->wiphy.txq_limit; old_txq_memory_limit = rdev->wiphy.txq_memory_limit; @@ -3900,8 +3963,13 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) rdev->wiphy.retry_long = retry_long; if (changed & WIPHY_PARAM_FRAG_THRESHOLD) rdev->wiphy.frag_threshold = frag_threshold; - if (changed & WIPHY_PARAM_RTS_THRESHOLD) + if ((changed & WIPHY_PARAM_RTS_THRESHOLD) && + old_radio_rts_threshold) { rdev->wiphy.rts_threshold = rts_threshold; + for (i = 0 ; i < rdev->wiphy.n_radio; i++) + rdev->wiphy.radio_cfg[i].rts_threshold = + rdev->wiphy.rts_threshold; + } if (changed & WIPHY_PARAM_COVERAGE_CLASS) rdev->wiphy.coverage_class = coverage_class; if (changed & WIPHY_PARAM_TXQ_LIMIT) @@ -3911,18 +3979,26 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) if (changed & WIPHY_PARAM_TXQ_QUANTUM) rdev->wiphy.txq_quantum = txq_quantum; - result = rdev_set_wiphy_params(rdev, changed); + result = rdev_set_wiphy_params(rdev, radio_idx, changed); if (result) { rdev->wiphy.retry_short = old_retry_short; rdev->wiphy.retry_long = old_retry_long; rdev->wiphy.frag_threshold = old_frag_threshold; rdev->wiphy.rts_threshold = old_rts_threshold; + if (old_radio_rts_threshold) { + for (i = 0 ; i < rdev->wiphy.n_radio; i++) + rdev->wiphy.radio_cfg[i].rts_threshold = + old_radio_rts_threshold[i]; + } rdev->wiphy.coverage_class = old_coverage_class; rdev->wiphy.txq_limit = old_txq_limit; rdev->wiphy.txq_memory_limit = old_txq_memory_limit; rdev->wiphy.txq_quantum = old_txq_quantum; - return result; } + + if (old_rts_threshold) + kfree(old_radio_rts_threshold); + return result; } return 0; @@ -4012,7 +4088,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag if (rdev->ops->get_tx_power && !wdev->valid_links) { int dbm, ret; - ret = rdev_get_tx_power(rdev, wdev, 0, &dbm); + ret = rdev_get_tx_power(rdev, wdev, -1, 0, &dbm); if (ret == 0 && nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL, DBM_TO_MBM(dbm))) @@ -4084,7 +4160,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag if (rdev->ops->get_tx_power) { int dbm, ret; - ret = rdev_get_tx_power(rdev, wdev, link_id, &dbm); + ret = rdev_get_tx_power(rdev, wdev, -1, link_id, &dbm); if (ret == 0 && nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL, DBM_TO_MBM(dbm))) @@ -6728,6 +6804,185 @@ static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal, return true; } +static int nl80211_fill_link_station(struct sk_buff *msg, + struct cfg80211_registered_device *rdev, + struct link_station_info *link_sinfo) +{ + struct nlattr *bss_param, *link_sinfoattr; + +#define PUT_LINK_SINFO(attr, memb, type) do { \ + BUILD_BUG_ON(sizeof(type) == sizeof(u64)); \ + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ + nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \ + link_sinfo->memb)) \ + goto nla_put_failure; \ + } while (0) +#define PUT_LINK_SINFO_U64(attr, memb) do { \ + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ + nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr, \ + link_sinfo->memb, NL80211_STA_INFO_PAD)) \ + goto nla_put_failure; \ + } while (0) + + link_sinfoattr = nla_nest_start_noflag(msg, NL80211_ATTR_STA_INFO); + if (!link_sinfoattr) + goto nla_put_failure; + + PUT_LINK_SINFO(INACTIVE_TIME, inactive_time, u32); + + if (link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) | + BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) && + nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, + (u32)link_sinfo->rx_bytes)) + goto nla_put_failure; + + if (link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES64)) && + nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, + (u32)link_sinfo->tx_bytes)) + goto nla_put_failure; + + PUT_LINK_SINFO_U64(RX_BYTES64, rx_bytes); + PUT_LINK_SINFO_U64(TX_BYTES64, tx_bytes); + PUT_LINK_SINFO_U64(RX_DURATION, rx_duration); + PUT_LINK_SINFO_U64(TX_DURATION, tx_duration); + + if (wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) + PUT_LINK_SINFO(AIRTIME_WEIGHT, airtime_weight, u16); + + switch (rdev->wiphy.signal_type) { + case CFG80211_SIGNAL_TYPE_MBM: + PUT_LINK_SINFO(SIGNAL, signal, u8); + PUT_LINK_SINFO(SIGNAL_AVG, signal_avg, u8); + break; + default: + break; + } + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) { + if (!nl80211_put_signal(msg, link_sinfo->chains, + link_sinfo->chain_signal, + NL80211_STA_INFO_CHAIN_SIGNAL)) + goto nla_put_failure; + } + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) { + if (!nl80211_put_signal(msg, link_sinfo->chains, + link_sinfo->chain_signal_avg, + NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) + goto nla_put_failure; + } + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) { + if (!nl80211_put_sta_rate(msg, &link_sinfo->txrate, + NL80211_STA_INFO_TX_BITRATE)) + goto nla_put_failure; + } + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) { + if (!nl80211_put_sta_rate(msg, &link_sinfo->rxrate, + NL80211_STA_INFO_RX_BITRATE)) + goto nla_put_failure; + } + + PUT_LINK_SINFO(RX_PACKETS, rx_packets, u32); + PUT_LINK_SINFO(TX_PACKETS, tx_packets, u32); + PUT_LINK_SINFO(TX_RETRIES, tx_retries, u32); + PUT_LINK_SINFO(TX_FAILED, tx_failed, u32); + PUT_LINK_SINFO(EXPECTED_THROUGHPUT, expected_throughput, u32); + PUT_LINK_SINFO(BEACON_LOSS, beacon_loss_count, u32); + + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) { + bss_param = nla_nest_start_noflag(msg, + NL80211_STA_INFO_BSS_PARAM); + if (!bss_param) + goto nla_put_failure; + + if (((link_sinfo->bss_param.flags & + BSS_PARAM_FLAGS_CTS_PROT) && + nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) || + ((link_sinfo->bss_param.flags & + BSS_PARAM_FLAGS_SHORT_PREAMBLE) && + nla_put_flag(msg, + NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) || + ((link_sinfo->bss_param.flags & + BSS_PARAM_FLAGS_SHORT_SLOT_TIME) && + nla_put_flag(msg, + NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) || + nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, + link_sinfo->bss_param.dtim_period) || + nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, + link_sinfo->bss_param.beacon_interval)) + goto nla_put_failure; + + nla_nest_end(msg, bss_param); + } + + PUT_LINK_SINFO_U64(RX_DROP_MISC, rx_dropped_misc); + PUT_LINK_SINFO_U64(BEACON_RX, rx_beacon); + PUT_LINK_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8); + PUT_LINK_SINFO(RX_MPDUS, rx_mpdu_count, u32); + PUT_LINK_SINFO(FCS_ERROR_COUNT, fcs_err_count, u32); + if (wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT)) { + PUT_LINK_SINFO(ACK_SIGNAL, ack_signal, u8); + PUT_LINK_SINFO(ACK_SIGNAL_AVG, avg_ack_signal, s8); + } + +#undef PUT_LINK_SINFO +#undef PUT_LINK_SINFO_U64 + + if (link_sinfo->pertid) { + struct nlattr *tidsattr; + int tid; + + tidsattr = nla_nest_start_noflag(msg, + NL80211_STA_INFO_TID_STATS); + if (!tidsattr) + goto nla_put_failure; + + for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) { + struct cfg80211_tid_stats *tidstats; + struct nlattr *tidattr; + + tidstats = &link_sinfo->pertid[tid]; + + if (!tidstats->filled) + continue; + + tidattr = nla_nest_start_noflag(msg, tid + 1); + if (!tidattr) + goto nla_put_failure; + +#define PUT_TIDVAL_U64(attr, memb) do { \ + if (tidstats->filled & BIT(NL80211_TID_STATS_ ## attr) && \ + nla_put_u64_64bit(msg, NL80211_TID_STATS_ ## attr, \ + tidstats->memb, NL80211_TID_STATS_PAD)) \ + goto nla_put_failure; \ + } while (0) + + PUT_TIDVAL_U64(RX_MSDU, rx_msdu); + PUT_TIDVAL_U64(TX_MSDU, tx_msdu); + PUT_TIDVAL_U64(TX_MSDU_RETRIES, tx_msdu_retries); + PUT_TIDVAL_U64(TX_MSDU_FAILED, tx_msdu_failed); + +#undef PUT_TIDVAL_U64 + if ((tidstats->filled & + BIT(NL80211_TID_STATS_TXQ_STATS)) && + !nl80211_put_txq_stats(msg, &tidstats->txq_stats, + NL80211_TID_STATS_TXQ_STATS)) + goto nla_put_failure; + + nla_nest_end(msg, tidattr); + } + + nla_nest_end(msg, tidsattr); + } + + nla_nest_end(msg, link_sinfoattr); + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, u32 seq, int flags, struct cfg80211_registered_device *rdev, @@ -6736,6 +6991,9 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, { void *hdr; struct nlattr *sinfoattr, *bss_param; + struct link_station_info *link_sinfo; + struct nlattr *links, *link; + int link_id; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) { @@ -6950,6 +7208,40 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, goto nla_put_failure; } + if (sinfo->valid_links) { + links = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS); + if (!links) + goto nla_put_failure; + + for_each_valid_link(sinfo, link_id) { + link_sinfo = sinfo->links[link_id]; + + if (WARN_ON_ONCE(!link_sinfo)) + continue; + + if (!is_valid_ether_addr(link_sinfo->addr)) + continue; + + link = nla_nest_start(msg, link_id + 1); + if (!link) + goto nla_put_failure; + + if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, + link_id)) + goto nla_put_failure; + + if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, + link_sinfo->addr)) + goto nla_put_failure; + + if (nl80211_fill_link_station(msg, rdev, link_sinfo)) + goto nla_put_failure; + + nla_nest_end(msg, link); + } + nla_nest_end(msg, links); + } + cfg80211_sinfo_release_content(sinfo); genlmsg_end(msg, hdr); return 0; @@ -6960,6 +7252,194 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, return -EMSGSIZE; } +static void cfg80211_sta_set_mld_sinfo(struct station_info *sinfo) +{ + struct link_station_info *link_sinfo; + int link_id, init = 0; + u32 link_inactive_time; + + sinfo->signal = -99; + + for_each_valid_link(sinfo, link_id) { + link_sinfo = sinfo->links[link_id]; + if (!link_sinfo) + continue; + + if ((link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { + sinfo->tx_packets += link_sinfo->tx_packets; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); + } + + if ((link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { + sinfo->rx_packets += link_sinfo->rx_packets; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); + } + + if (link_sinfo->filled & + (BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES64))) { + sinfo->tx_bytes += link_sinfo->tx_bytes; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES); + } + + if (link_sinfo->filled & + (BIT_ULL(NL80211_STA_INFO_RX_BYTES) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES64))) { + sinfo->rx_bytes += link_sinfo->rx_bytes; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES); + } + + if (link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_TX_RETRIES)) { + sinfo->tx_retries += link_sinfo->tx_retries; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + } + + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED)) { + sinfo->tx_failed += link_sinfo->tx_failed; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + } + + if (link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC)) { + sinfo->rx_dropped_misc += link_sinfo->rx_dropped_misc; + sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); + } + + if (link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_BEACON_LOSS)) { + sinfo->beacon_loss_count += + link_sinfo->beacon_loss_count; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); + } + + if (link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT)) { + sinfo->expected_throughput += + link_sinfo->expected_throughput; + sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); + } + + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_MPDUS)) { + sinfo->rx_mpdu_count += link_sinfo->rx_mpdu_count; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_MPDUS); + } + + if (link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_FCS_ERROR_COUNT)) { + sinfo->fcs_err_count += link_sinfo->fcs_err_count; + sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_FCS_ERROR_COUNT); + } + + if (link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_BEACON_RX)) { + sinfo->rx_beacon += link_sinfo->rx_beacon; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); + } + + /* Update MLO signal, signal_avg as best among links */ + if ((link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) && + link_sinfo->signal > sinfo->signal) { + sinfo->signal = link_sinfo->signal; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + } + + if ((link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG)) && + link_sinfo->signal_avg > sinfo->signal_avg) { + sinfo->signal_avg = link_sinfo->signal_avg; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); + } + + /* Update MLO inactive_time, bss_param based on least + * value for corresponding field of link. + */ + if ((link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME)) && + (!init || + link_inactive_time > link_sinfo->inactive_time)) { + link_inactive_time = link_sinfo->inactive_time; + sinfo->inactive_time = link_sinfo->inactive_time; + sinfo->filled |= NL80211_STA_INFO_INACTIVE_TIME; + } + + if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM) && + (!init || + sinfo->bss_param.dtim_period > + link_sinfo->bss_param.dtim_period)) { + sinfo->bss_param.dtim_period = + link_sinfo->bss_param.dtim_period; + sinfo->filled |= NL80211_STA_BSS_PARAM_DTIM_PERIOD; + sinfo->bss_param.beacon_interval = + link_sinfo->bss_param.beacon_interval; + sinfo->filled |= NL80211_STA_BSS_PARAM_BEACON_INTERVAL; + } + + /* Update MLO rates as per last updated link rate */ + if ((link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) && + (!init || + link_inactive_time > link_sinfo->inactive_time)) { + sinfo->txrate = link_sinfo->txrate; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + } + if ((link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) && + (!init || + link_inactive_time > link_sinfo->inactive_time)) { + sinfo->rxrate = link_sinfo->rxrate; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); + } + + if (link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_TX_DURATION) && + (!init || + link_inactive_time > link_sinfo->inactive_time)) { + sinfo->tx_duration += link_sinfo->tx_duration; + sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_TX_DURATION); + } + if (link_sinfo->filled & + BIT_ULL(NL80211_STA_INFO_RX_DURATION) && + (!init || + link_inactive_time > link_sinfo->inactive_time)) { + sinfo->rx_duration += link_sinfo->rx_duration; + sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_RX_DURATION); + } + init++; + + /* pertid stats accumulate for rx/tx fields */ + if (sinfo->pertid) { + sinfo->pertid->rx_msdu += + link_sinfo->pertid->rx_msdu; + sinfo->pertid->tx_msdu += + link_sinfo->pertid->tx_msdu; + sinfo->pertid->tx_msdu_retries += + link_sinfo->pertid->tx_msdu_retries; + sinfo->pertid->tx_msdu_failed += + link_sinfo->pertid->tx_msdu_failed; + + sinfo->pertid->filled |= + BIT(NL80211_TID_STATS_RX_MSDU) | + BIT(NL80211_TID_STATS_TX_MSDU) | + BIT(NL80211_TID_STATS_TX_MSDU_RETRIES) | + BIT(NL80211_TID_STATS_TX_MSDU_FAILED); + } + } + + /* Reset sinfo->filled bits to exclude fields which don't make + * much sense at the MLO level. + */ + sinfo->filled &= ~BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); + sinfo->filled &= ~BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); +} + static int nl80211_dump_station(struct sk_buff *skb, struct netlink_callback *cb) { @@ -6968,7 +7448,7 @@ static int nl80211_dump_station(struct sk_buff *skb, struct wireless_dev *wdev; u8 mac_addr[ETH_ALEN]; int sta_idx = cb->args[2]; - int err; + int err, i; err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL); if (err) @@ -6988,6 +7468,16 @@ static int nl80211_dump_station(struct sk_buff *skb, while (1) { memset(&sinfo, 0, sizeof(sinfo)); + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + sinfo.links[i] = + kzalloc(sizeof(*sinfo.links[0]), GFP_KERNEL); + if (!sinfo.links[i]) { + err = -ENOMEM; + goto out_err; + } + } + err = rdev_dump_station(rdev, wdev->netdev, sta_idx, mac_addr, &sinfo); if (err == -ENOENT) @@ -6995,6 +7485,9 @@ static int nl80211_dump_station(struct sk_buff *skb, if (err) goto out_err; + if (sinfo.valid_links) + cfg80211_sta_set_mld_sinfo(&sinfo); + if (nl80211_send_station(skb, NL80211_CMD_NEW_STATION, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, @@ -7009,6 +7502,7 @@ static int nl80211_dump_station(struct sk_buff *skb, cb->args[2] = sta_idx; err = skb->len; out_err: + cfg80211_sinfo_release_content(&sinfo); wiphy_unlock(&rdev->wiphy); return err; @@ -7021,7 +7515,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) struct station_info sinfo; struct sk_buff *msg; u8 *mac_addr = NULL; - int err; + int err, i; memset(&sinfo, 0, sizeof(sinfo)); @@ -7033,9 +7527,19 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->get_station) return -EOPNOTSUPP; + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + sinfo.links[i] = kzalloc(sizeof(*sinfo.links[0]), GFP_KERNEL); + if (!sinfo.links[i]) { + cfg80211_sinfo_release_content(&sinfo); + return -ENOMEM; + } + } + err = rdev_get_station(rdev, dev, mac_addr, &sinfo); - if (err) + if (err) { + cfg80211_sinfo_release_content(&sinfo); return err; + } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { @@ -7043,6 +7547,9 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; } + if (sinfo.valid_links) + cfg80211_sta_set_mld_sinfo(&sinfo); + if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, info->snd_portid, info->snd_seq, 0, rdev, dev, mac_addr, &sinfo) < 0) { @@ -7349,6 +7856,10 @@ static int nl80211_set_station_tdls(struct genl_info *info, } } + if (info->attrs[NL80211_ATTR_S1G_CAPABILITY]) + params->link_sta_params.s1g_capa = + nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY]); + err = nl80211_parse_sta_channel_info(info, params); if (err) return err; @@ -7675,6 +8186,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.link_sta_params.he_6ghz_capa = nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]); + if (info->attrs[NL80211_ATTR_S1G_CAPABILITY]) + params.link_sta_params.s1g_capa = + nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY]); + if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { params.link_sta_params.opmode_notif_used = true; params.link_sta_params.opmode_notif = diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 9f4783c2354c..803b39c26587 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -577,35 +577,40 @@ static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev, } static inline int -rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed) +rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, int radio_idx, + u32 changed) { int ret = -EOPNOTSUPP; - trace_rdev_set_wiphy_params(&rdev->wiphy, changed); + trace_rdev_set_wiphy_params(&rdev->wiphy, radio_idx, changed); if (rdev->ops->set_wiphy_params) - ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); + ret = rdev->ops->set_wiphy_params(&rdev->wiphy, radio_idx, + changed); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev, - struct wireless_dev *wdev, - enum nl80211_tx_power_setting type, int mbm) + struct wireless_dev *wdev, int radio_idx, + enum nl80211_tx_power_setting type, + int mbm) { int ret; - trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm); - ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm); + trace_rdev_set_tx_power(&rdev->wiphy, wdev, radio_idx, type, mbm); + ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, radio_idx, type, + mbm); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev, - struct wireless_dev *wdev, unsigned int link_id, - int *dbm) + struct wireless_dev *wdev, int radio_idx, + unsigned int link_id, int *dbm) { int ret; - trace_rdev_get_tx_power(&rdev->wiphy, wdev, link_id); - ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, link_id, dbm); + trace_rdev_get_tx_power(&rdev->wiphy, wdev, radio_idx, link_id); + ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, radio_idx, link_id, + dbm); trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm); return ret; } @@ -857,21 +862,21 @@ rdev_update_mgmt_frame_registrations(struct cfg80211_registered_device *rdev, } static inline int rdev_set_antenna(struct cfg80211_registered_device *rdev, - u32 tx_ant, u32 rx_ant) + int radio_idx, u32 tx_ant, u32 rx_ant) { int ret; - trace_rdev_set_antenna(&rdev->wiphy, tx_ant, rx_ant); - ret = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant); + trace_rdev_set_antenna(&rdev->wiphy, radio_idx, tx_ant, rx_ant); + ret = rdev->ops->set_antenna(&rdev->wiphy, -1, tx_ant, rx_ant); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev, - u32 *tx_ant, u32 *rx_ant) + int radio_idx, u32 *tx_ant, u32 *rx_ant) { int ret; - trace_rdev_get_antenna(&rdev->wiphy); - ret = rdev->ops->get_antenna(&rdev->wiphy, tx_ant, rx_ant); + trace_rdev_get_antenna(&rdev->wiphy, radio_idx); + ret = rdev->ops->get_antenna(&rdev->wiphy, radio_idx, tx_ant, rx_ant); if (ret) trace_rdev_return_int(&rdev->wiphy, ret); else diff --git a/net/wireless/scan.c b/net/wireless/scan.c index ddd3a97f6609..e8a4fe44ec2d 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -3250,6 +3250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, const u8 *ie; size_t ielen; u64 tsf; + size_t s1g_optional_len; if (WARN_ON(!mgmt)) return NULL; @@ -3264,12 +3265,11 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { ext = (void *) mgmt; - if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) - min_hdr_len = offsetof(struct ieee80211_ext, - u.s1g_short_beacon.variable); - else - min_hdr_len = offsetof(struct ieee80211_ext, - u.s1g_beacon.variable); + s1g_optional_len = + ieee80211_s1g_optional_len(ext->frame_control); + min_hdr_len = + offsetof(struct ieee80211_ext, u.s1g_beacon.variable) + + s1g_optional_len; } else { /* same for beacons */ min_hdr_len = offsetof(struct ieee80211_mgmt, @@ -3285,11 +3285,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, const struct ieee80211_s1g_bcn_compat_ie *compat; const struct element *elem; - if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) - ie = ext->u.s1g_short_beacon.variable; - else - ie = ext->u.s1g_beacon.variable; - + ie = ext->u.s1g_beacon.variable + s1g_optional_len; elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, ie, ielen); if (!elem) return NULL; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 4ed9fada4ec0..7e43ab9de923 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -406,9 +406,19 @@ DEFINE_EVENT(wiphy_only_evt, rdev_return_void, TP_ARGS(wiphy) ); -DEFINE_EVENT(wiphy_only_evt, rdev_get_antenna, - TP_PROTO(struct wiphy *wiphy), - TP_ARGS(wiphy) +TRACE_EVENT(rdev_get_antenna, + TP_PROTO(struct wiphy *wiphy, int radio_idx), + TP_ARGS(wiphy, radio_idx), + TP_STRUCT__entry( + WIPHY_ENTRY + __field(int, radio_idx) + ), + TP_fast_assign( + WIPHY_ASSIGN; + __entry->radio_idx = radio_idx; + ), + TP_printk(WIPHY_PR_FMT ", radio_idx: %d", + WIPHY_PR_ARG, __entry->radio_idx) ); DEFINE_EVENT(wiphy_only_evt, rdev_rfkill_poll, @@ -1678,18 +1688,20 @@ TRACE_EVENT(rdev_join_ocb, ); TRACE_EVENT(rdev_set_wiphy_params, - TP_PROTO(struct wiphy *wiphy, u32 changed), - TP_ARGS(wiphy, changed), + TP_PROTO(struct wiphy *wiphy, int radio_idx, u32 changed), + TP_ARGS(wiphy, radio_idx, changed), TP_STRUCT__entry( WIPHY_ENTRY + __field(int, radio_idx) __field(u32, changed) ), TP_fast_assign( WIPHY_ASSIGN; + __entry->radio_idx = radio_idx; __entry->changed = changed; ), - TP_printk(WIPHY_PR_FMT ", changed: %u", - WIPHY_PR_ARG, __entry->changed) + TP_printk(WIPHY_PR_FMT ", radio_idx: %d, changed: %u", + WIPHY_PR_ARG, __entry->radio_idx, __entry->changed) ); DECLARE_EVENT_CLASS(wiphy_wdev_link_evt, @@ -1710,30 +1722,51 @@ DECLARE_EVENT_CLASS(wiphy_wdev_link_evt, WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id) ); -DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_tx_power, +TRACE_EVENT(rdev_get_tx_power, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, - unsigned int link_id), - TP_ARGS(wiphy, wdev, link_id) + int radio_idx, unsigned int link_id), + TP_ARGS(wiphy, wdev, radio_idx, link_id), + TP_STRUCT__entry( + WIPHY_ENTRY + WDEV_ENTRY + __field(int, radio_idx) + __field(unsigned int, link_id) + ), + TP_fast_assign( + WIPHY_ASSIGN; + WDEV_ASSIGN; + __entry->radio_idx = radio_idx; + __entry->link_id = link_id; + ), + TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT + ", radio_idx: %d, link_id: %u", + WIPHY_PR_ARG, WDEV_PR_ARG, + __entry->radio_idx, __entry->link_id) ); TRACE_EVENT(rdev_set_tx_power, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, - enum nl80211_tx_power_setting type, int mbm), - TP_ARGS(wiphy, wdev, type, mbm), + int radio_idx, enum nl80211_tx_power_setting type, + int mbm), + TP_ARGS(wiphy, wdev, radio_idx, type, mbm), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY + __field(int, radio_idx) __field(enum nl80211_tx_power_setting, type) __field(int, mbm) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; + __entry->radio_idx = radio_idx; __entry->type = type; __entry->mbm = mbm; ), - TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type: %u, mbm: %d", - WIPHY_PR_ARG, WDEV_PR_ARG,__entry->type, __entry->mbm) + TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT + ", radio_idx: %d, type: %u, mbm: %d", + WIPHY_PR_ARG, WDEV_PR_ARG, + __entry->radio_idx, __entry->type, __entry->mbm) ); TRACE_EVENT(rdev_return_int_int, @@ -1866,26 +1899,24 @@ TRACE_EVENT(rdev_return_void_tx_rx, __entry->rx_max) ); -DECLARE_EVENT_CLASS(tx_rx_evt, - TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx), - TP_ARGS(wiphy, tx, rx), +TRACE_EVENT(rdev_set_antenna, + TP_PROTO(struct wiphy *wiphy, int radio_idx, u32 tx, u32 rx), + TP_ARGS(wiphy, radio_idx, tx, rx), TP_STRUCT__entry( WIPHY_ENTRY + __field(int, radio_idx) __field(u32, tx) __field(u32, rx) ), TP_fast_assign( WIPHY_ASSIGN; + __entry->radio_idx = radio_idx; __entry->tx = tx; __entry->rx = rx; ), - TP_printk(WIPHY_PR_FMT ", tx: %u, rx: %u ", - WIPHY_PR_ARG, __entry->tx, __entry->rx) -); - -DEFINE_EVENT(tx_rx_evt, rdev_set_antenna, - TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx), - TP_ARGS(wiphy, tx, rx) + TP_printk(WIPHY_PR_FMT ", radio_idx: %d, tx: %u, rx: %u ", + WIPHY_PR_ARG, __entry->radio_idx, + __entry->tx, __entry->rx) ); DECLARE_EVENT_CLASS(wiphy_netdev_id_evt, @@ -4126,20 +4157,22 @@ TRACE_EVENT(cfg80211_links_removed, TRACE_EVENT(cfg80211_mlo_reconf_add_done, TP_PROTO(struct net_device *netdev, u16 link_mask, - const u8 *buf, size_t len), - TP_ARGS(netdev, link_mask, buf, len), + const u8 *buf, size_t len, bool driver_initiated), + TP_ARGS(netdev, link_mask, buf, len, driver_initiated), TP_STRUCT__entry( NETDEV_ENTRY __field(u16, link_mask) __dynamic_array(u8, buf, len) + __field(bool, driver_initiated) ), TP_fast_assign( NETDEV_ASSIGN; __entry->link_mask = link_mask; memcpy(__get_dynamic_array(buf), buf, len); + __entry->driver_initiated = driver_initiated; ), - TP_printk(NETDEV_PR_FMT ", link_mask:0x%x", - NETDEV_PR_ARG, __entry->link_mask) + TP_printk(NETDEV_PR_FMT ", link_mask:0x%x, driver_initiated:%d", + NETDEV_PR_ARG, __entry->link_mask, __entry->driver_initiated) ); TRACE_EVENT(rdev_assoc_ml_reconf, diff --git a/net/wireless/util.c b/net/wireless/util.c index ed868c0f7ca8..5aff11c35303 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -2516,6 +2516,30 @@ int cfg80211_check_combinations(struct wiphy *wiphy, } EXPORT_SYMBOL(cfg80211_check_combinations); +int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy, + const struct ieee80211_channel *chan) +{ + const struct wiphy_radio *radio; + int i, j; + u32 freq; + + if (!chan) + return -EINVAL; + + freq = ieee80211_channel_to_khz(chan); + for (i = 0; i < wiphy->n_radio; i++) { + radio = &wiphy->radio[i]; + for (j = 0; j < radio->n_freq_range; j++) { + if (freq >= radio->freq_range[j].start_freq && + freq < radio->freq_range[j].end_freq) + return i; + } + } + + return -ENOENT; +} +EXPORT_SYMBOL(cfg80211_get_radio_idx_by_chan); + int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, const u8 *rates, unsigned int n_rates, u32 *mask) @@ -2626,6 +2650,18 @@ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, return false; } +int cfg80211_link_sinfo_alloc_tid_stats(struct link_station_info *link_sinfo, + gfp_t gfp) +{ + link_sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1, + sizeof(*link_sinfo->pertid), gfp); + if (!link_sinfo->pertid) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(cfg80211_link_sinfo_alloc_tid_stats); + int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp) { sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1, diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index a74b1afc594e..1241fda78a68 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -263,7 +263,7 @@ int cfg80211_wext_siwrts(struct net_device *dev, else wdev->wiphy->rts_threshold = rts->value; - err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_RTS_THRESHOLD); + err = rdev_set_wiphy_params(rdev, -1, WIPHY_PARAM_RTS_THRESHOLD); if (err) wdev->wiphy->rts_threshold = orts; return err; @@ -304,7 +304,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev, wdev->wiphy->frag_threshold = frag->value & ~0x1; } - err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_FRAG_THRESHOLD); + err = rdev_set_wiphy_params(rdev, -1, WIPHY_PARAM_FRAG_THRESHOLD); if (err) wdev->wiphy->frag_threshold = ofrag; return err; @@ -355,7 +355,7 @@ static int cfg80211_wext_siwretry(struct net_device *dev, changed |= WIPHY_PARAM_RETRY_SHORT; } - err = rdev_set_wiphy_params(rdev, changed); + err = rdev_set_wiphy_params(rdev, -1, changed); if (err) { wdev->wiphy->retry_short = oshort; wdev->wiphy->retry_long = olong; @@ -890,7 +890,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev, guard(wiphy)(&rdev->wiphy); - return rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm)); + return rdev_set_tx_power(rdev, wdev, -1, type, DBM_TO_MBM(dbm)); } static int cfg80211_wext_giwtxpower(struct net_device *dev, @@ -910,7 +910,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev, return -EOPNOTSUPP; scoped_guard(wiphy, &rdev->wiphy) { - err = rdev_get_tx_power(rdev, wdev, 0, &val); + err = rdev_get_tx_power(rdev, wdev, -1, 0, &val); } if (err) return err; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 8dda4178497c..1f8ae9f4a3f1 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -359,7 +359,7 @@ static void __x25_destroy_socket(struct sock *); */ static void x25_destroy_timer(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); x25_destroy_socket_from_timer(sk); } diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index 37b190499405..4608aa5b4f31 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c @@ -46,7 +46,7 @@ static inline void x25_start_t20timer(struct x25_neigh *nb) static void x25_t20timer_expiry(struct timer_list *t) { - struct x25_neigh *nb = from_timer(nb, t, t20timer); + struct x25_neigh *nb = timer_container_of(nb, t, t20timer); x25_transmit_restart_request(nb); diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c index e4c5ad5b070f..2ec63a1f4c6d 100644 --- a/net/x25/x25_timer.c +++ b/net/x25/x25_timer.c @@ -89,7 +89,7 @@ unsigned long x25_display_timer(struct sock *sk) static void x25_heartbeat_expiry(struct timer_list *t) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sock *sk = timer_container_of(sk, t, sk_timer); bh_lock_sock(sk); if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */ @@ -156,7 +156,7 @@ static inline void x25_do_timer_expiry(struct sock * sk) static void x25_timer_expiry(struct timer_list *t) { - struct x25_sock *x25 = from_timer(x25, t, timer); + struct x25_sock *x25 = timer_container_of(x25, t, timer); struct sock *sk = &x25->sk; bh_lock_sock(sk); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d4134a18c658..094d2454602e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -353,7 +353,7 @@ static inline unsigned long make_jiffies(long secs) static void xfrm_policy_timer(struct timer_list *t) { - struct xfrm_policy *xp = from_timer(xp, t, timer); + struct xfrm_policy *xp = timer_container_of(xp, t, timer); time64_t now = ktime_get_real_seconds(); time64_t next = TIME64_MAX; int warn = 0; @@ -2898,7 +2898,7 @@ static void xfrm_policy_queue_process(struct timer_list *t) struct sk_buff *skb; struct sock *sk; struct dst_entry *dst; - struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer); + struct xfrm_policy *pol = timer_container_of(pol, t, polq.hold_timer); struct net *net = xp_net(pol); struct xfrm_policy_queue *pq = &pol->polq; struct flowi fl; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 203b585c2ae2..77cc418ad69e 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2697,7 +2697,7 @@ EXPORT_SYMBOL(xfrm_state_walk_done); static void xfrm_replay_timer_handler(struct timer_list *t) { - struct xfrm_state *x = from_timer(x, t, rtimer); + struct xfrm_state *x = timer_container_of(x, t, rtimer); spin_lock(&x->lock); |