diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2023-05-31 15:31:56 -0300 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2023-05-31 15:31:56 -0300 |
commit | d17ed982e4497070e4575ecd8c5afcfcb7210b8b (patch) | |
tree | 1524dbccee3c535ab3a8d027a170fe86a7c2935c /net | |
parent | 422db30713ac84080a8c4b3efa9dd560b654ed57 (diff) | |
parent | c041d33bf7ec731bb71f47e4d45a7aec9e40b1b9 (diff) |
Merge tag 'perf-tools-fixes-for-v6.4-2-2023-05-30' into perf-tools-next
perf tools fixes for v6.4: 2nd batch
- Fix BPF CO-RE naming convention for checking the availability of fields on
'union perf_mem_data_src' on the running kernel.
- Remove the use of llvm-strip on BPF skel object files, not needed, fixes a
build breakage when the llvm package, that contains it in most distros, isn't
installed.
- Fix tools that use both evsel->{bpf_counter_list,bpf_filters}, removing them from a
union.
- Remove extra "--" from the 'perf ftrace latency' --use-nsec option,
previously it was working only when using the '-n' alternative.
- Don't stop building when both binutils-devel and a C++ compiler isn't
available to compile the alternative C++ demangle support code, disable that
feature instead.
- Sync the linux/in.h and coresight-pmu.h header copies with the kernel sources.
- Fix relative include path to cs-etm.h.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'net')
85 files changed, 771 insertions, 469 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 870e4935d6e6..b90781b9ece6 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -109,8 +109,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ - if (veth->h_vlan_proto != vlan->vlan_proto || - vlan->flags & VLAN_FLAG_REORDER_HDR) { + if (vlan->flags & VLAN_FLAG_REORDER_HDR || + veth->h_vlan_proto != vlan->vlan_proto) { u16 vlan_tci; vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); diff --git a/net/atm/resources.c b/net/atm/resources.c index 2b2d33eeaf20..995d29e7fb13 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c @@ -400,6 +400,7 @@ done: return error; } +#ifdef CONFIG_PROC_FS void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos) { mutex_lock(&atm_dev_mutex); @@ -415,3 +416,4 @@ void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &atm_devs, pos); } +#endif diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 640b951bf40a..f75ef12f18f7 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1083,8 +1083,28 @@ static void hci_conn_unlink(struct hci_conn *conn) if (!conn->parent) { struct hci_link *link, *t; - list_for_each_entry_safe(link, t, &conn->link_list, list) - hci_conn_unlink(link->conn); + list_for_each_entry_safe(link, t, &conn->link_list, list) { + struct hci_conn *child = link->conn; + + hci_conn_unlink(child); + + /* If hdev is down it means + * hci_dev_close_sync/hci_conn_hash_flush is in progress + * and links don't need to be cleanup as all connections + * would be cleanup. + */ + if (!test_bit(HCI_UP, &hdev->flags)) + continue; + + /* Due to race, SCO connection might be not established + * yet at this point. Delete it now, otherwise it is + * possible for it to be stuck and can't be deleted. + */ + if ((child->type == SCO_LINK || + child->type == ESCO_LINK) && + child->handle == HCI_CONN_HANDLE_UNSET) + hci_conn_del(child); + } return; } @@ -1092,35 +1112,30 @@ static void hci_conn_unlink(struct hci_conn *conn) if (!conn->link) return; - hci_conn_put(conn->parent); - conn->parent = NULL; - list_del_rcu(&conn->link->list); synchronize_rcu(); + hci_conn_drop(conn->parent); + hci_conn_put(conn->parent); + conn->parent = NULL; + kfree(conn->link); conn->link = NULL; - - /* Due to race, SCO connection might be not established - * yet at this point. Delete it now, otherwise it is - * possible for it to be stuck and can't be deleted. - */ - if (conn->handle == HCI_CONN_HANDLE_UNSET) - hci_conn_del(conn); } -int hci_conn_del(struct hci_conn *conn) +void hci_conn_del(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); + hci_conn_unlink(conn); + cancel_delayed_work_sync(&conn->disc_work); cancel_delayed_work_sync(&conn->auto_accept_work); cancel_delayed_work_sync(&conn->idle_work); if (conn->type == ACL_LINK) { - hci_conn_unlink(conn); /* Unacked frames */ hdev->acl_cnt += conn->sent; } else if (conn->type == LE_LINK) { @@ -1131,13 +1146,6 @@ int hci_conn_del(struct hci_conn *conn) else hdev->acl_cnt += conn->sent; } else { - struct hci_conn *acl = conn->parent; - - if (acl) { - hci_conn_unlink(conn); - hci_conn_drop(acl); - } - /* Unacked ISO frames */ if (conn->type == ISO_LINK) { if (hdev->iso_pkts) @@ -1160,8 +1168,6 @@ int hci_conn_del(struct hci_conn *conn) * rest of hci_conn_del. */ hci_conn_cleanup(conn); - - return 0; } struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) @@ -2462,22 +2468,21 @@ timer: /* Drop all connection on the device */ void hci_conn_hash_flush(struct hci_dev *hdev) { - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c, *n; + struct list_head *head = &hdev->conn_hash.list; + struct hci_conn *conn; BT_DBG("hdev %s", hdev->name); - list_for_each_entry_safe(c, n, &h->list, list) { - c->state = BT_CLOSED; - - hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); - - /* Unlink before deleting otherwise it is possible that - * hci_conn_del removes the link which may cause the list to - * contain items already freed. - */ - hci_conn_unlink(c); - hci_conn_del(c); + /* We should not traverse the list here, because hci_conn_del + * can remove extra links, which may cause the list traversal + * to hit items that have already been released. + */ + while ((conn = list_first_entry_or_null(head, + struct hci_conn, + list)) != NULL) { + conn->state = BT_CLOSED; + hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM); + hci_conn_del(conn); } } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 57744704ff69..84d6dd5e5b1a 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -42,7 +42,7 @@ int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb eth_type_vlan(skb->protocol)) { int depth; - if (!__vlan_get_protocol(skb, skb->protocol, &depth)) + if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth)) goto drop; skb_set_network_header(skb, depth); diff --git a/net/bridge/br_private_tunnel.h b/net/bridge/br_private_tunnel.h index 2b053289f016..efb096025151 100644 --- a/net/bridge/br_private_tunnel.h +++ b/net/bridge/br_private_tunnel.h @@ -27,6 +27,10 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br, int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg); int br_fill_vlan_tunnel_info(struct sk_buff *skb, struct net_bridge_vlan_group *vg); +bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr, + const struct net_bridge_vlan *v_last); +int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd, + u16 vid, u32 tun_id, bool *changed); #ifdef CONFIG_BRIDGE_VLAN_FILTERING /* br_vlan_tunnel.c */ @@ -43,10 +47,6 @@ void br_handle_ingress_vlan_tunnel(struct sk_buff *skb, struct net_bridge_vlan_group *vg); int br_handle_egress_vlan_tunnel(struct sk_buff *skb, struct net_bridge_vlan *vlan); -bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr, - const struct net_bridge_vlan *v_last); -int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd, - u16 vid, u32 tun_id, bool *changed); #else static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg) { diff --git a/net/can/isotp.c b/net/can/isotp.c index a750259cb79c..84f9aba02901 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -1139,7 +1139,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, struct isotp_sock *so = isotp_sk(sk); int ret = 0; - if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK)) + if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK | MSG_CMSG_COMPAT)) return -EINVAL; if (!so->bound) diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index 7e90f9e61d9b..1790469b2580 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -798,7 +798,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg, struct j1939_sk_buff_cb *skcb; int ret = 0; - if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE)) + if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT)) return -EINVAL; if (flags & MSG_ERRQUEUE) diff --git a/net/core/datagram.c b/net/core/datagram.c index 5662dff3d381..176eb5834746 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -807,18 +807,21 @@ __poll_t datagram_poll(struct file *file, struct socket *sock, { struct sock *sk = sock->sk; __poll_t mask; + u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; /* exceptional events? */ - if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) + if (READ_ONCE(sk->sk_err) || + !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); - if (sk->sk_shutdown & RCV_SHUTDOWN) + shutdown = READ_ONCE(sk->sk_shutdown); + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; - if (sk->sk_shutdown == SHUTDOWN_MASK) + if (shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; /* readable? */ @@ -827,10 +830,12 @@ __poll_t datagram_poll(struct file *file, struct socket *sock, /* Connection-based need to check for termination and startup */ if (connection_based(sk)) { - if (sk->sk_state == TCP_CLOSE) + int state = READ_ONCE(sk->sk_state); + + if (state == TCP_CLOSE) mask |= EPOLLHUP; /* connection hasn't started yet? */ - if (sk->sk_state == TCP_SYN_SENT) + if (state == TCP_SYN_SENT) return mask; } diff --git a/net/core/dev.c b/net/core/dev.c index 735096d42c1d..b3c13e041935 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3335,7 +3335,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) type = eth->h_proto; } - return __vlan_get_protocol(skb, type, depth); + return vlan_get_protocol_and_depth(skb, type, depth); } /* openvswitch calls this on rx path, so we need a different check. diff --git a/net/core/page_pool.c b/net/core/page_pool.c index e212e9d7edcb..a3e12a61d456 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -134,6 +134,29 @@ EXPORT_SYMBOL(page_pool_ethtool_stats_get); #define recycle_stat_add(pool, __stat, val) #endif +static bool page_pool_producer_lock(struct page_pool *pool) + __acquires(&pool->ring.producer_lock) +{ + bool in_softirq = in_softirq(); + + if (in_softirq) + spin_lock(&pool->ring.producer_lock); + else + spin_lock_bh(&pool->ring.producer_lock); + + return in_softirq; +} + +static void page_pool_producer_unlock(struct page_pool *pool, + bool in_softirq) + __releases(&pool->ring.producer_lock) +{ + if (in_softirq) + spin_unlock(&pool->ring.producer_lock); + else + spin_unlock_bh(&pool->ring.producer_lock); +} + static int page_pool_init(struct page_pool *pool, const struct page_pool_params *params) { @@ -617,6 +640,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count) { int i, bulk_len = 0; + bool in_softirq; for (i = 0; i < count; i++) { struct page *page = virt_to_head_page(data[i]); @@ -635,7 +659,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, return; /* Bulk producer into ptr_ring page_pool cache */ - page_pool_ring_lock(pool); + in_softirq = page_pool_producer_lock(pool); for (i = 0; i < bulk_len; i++) { if (__ptr_ring_produce(&pool->ring, data[i])) { /* ring full */ @@ -644,7 +668,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, } } recycle_stat_add(pool, ring, i); - page_pool_ring_unlock(pool); + page_pool_producer_unlock(pool, in_softirq); /* Hopefully all pages was return into ptr_ring */ if (likely(i == bulk_len)) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26a586007d8b..cea28d30abb5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5224,8 +5224,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, } else { skb = skb_clone(orig_skb, GFP_ATOMIC); - if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) + if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { + kfree_skb(skb); return; + } } if (!skb) return; @@ -5298,7 +5300,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); u32 csum_start = skb_headroom(skb) + (u32)start; - if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { + if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", start, off, skb_headroom(skb), skb_headlen(skb)); return false; @@ -5306,7 +5308,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = csum_start; skb->csum_offset = off; - skb_set_transport_header(skb, start); + skb->transport_header = csum_start; return true; } EXPORT_SYMBOL_GPL(skb_partial_csum_set); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index f81883759d38..a9060e1f0e43 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -481,8 +481,6 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, msg_rx = sk_psock_peek_msg(psock); } out: - if (psock->work_state.skb && copied > 0) - schedule_work(&psock->work); return copied; } EXPORT_SYMBOL_GPL(sk_msg_recvmsg); @@ -624,42 +622,33 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, static void sk_psock_skb_state(struct sk_psock *psock, struct sk_psock_work_state *state, - struct sk_buff *skb, int len, int off) { spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { - state->skb = skb; state->len = len; state->off = off; - } else { - sock_drop(psock->sk, skb); } spin_unlock_bh(&psock->ingress_lock); } static void sk_psock_backlog(struct work_struct *work) { - struct sk_psock *psock = container_of(work, struct sk_psock, work); + struct delayed_work *dwork = to_delayed_work(work); + struct sk_psock *psock = container_of(dwork, struct sk_psock, work); struct sk_psock_work_state *state = &psock->work_state; struct sk_buff *skb = NULL; + u32 len = 0, off = 0; bool ingress; - u32 len, off; int ret; mutex_lock(&psock->work_mutex); - if (unlikely(state->skb)) { - spin_lock_bh(&psock->ingress_lock); - skb = state->skb; + if (unlikely(state->len)) { len = state->len; off = state->off; - state->skb = NULL; - spin_unlock_bh(&psock->ingress_lock); } - if (skb) - goto start; - while ((skb = skb_dequeue(&psock->ingress_skb))) { + while ((skb = skb_peek(&psock->ingress_skb))) { len = skb->len; off = 0; if (skb_bpf_strparser(skb)) { @@ -668,7 +657,6 @@ static void sk_psock_backlog(struct work_struct *work) off = stm->offset; len = stm->full_len; } -start: ingress = skb_bpf_ingress(skb); skb_bpf_redirect_clear(skb); do { @@ -678,22 +666,28 @@ start: len, ingress); if (ret <= 0) { if (ret == -EAGAIN) { - sk_psock_skb_state(psock, state, skb, - len, off); + sk_psock_skb_state(psock, state, len, off); + + /* Delay slightly to prioritize any + * other work that might be here. + */ + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + schedule_delayed_work(&psock->work, 1); goto end; } /* Hard errors break pipe and stop xmit. */ sk_psock_report_error(psock, ret ? -ret : EPIPE); sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); - sock_drop(psock->sk, skb); goto end; } off += ret; len -= ret; } while (len); - if (!ingress) + skb = skb_dequeue(&psock->ingress_skb); + if (!ingress) { kfree_skb(skb); + } } end: mutex_unlock(&psock->work_mutex); @@ -734,7 +728,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node) INIT_LIST_HEAD(&psock->link); spin_lock_init(&psock->link_lock); - INIT_WORK(&psock->work, sk_psock_backlog); + INIT_DELAYED_WORK(&psock->work, sk_psock_backlog); mutex_init(&psock->work_mutex); INIT_LIST_HEAD(&psock->ingress_msg); spin_lock_init(&psock->ingress_lock); @@ -786,11 +780,6 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock) skb_bpf_redirect_clear(skb); sock_drop(psock->sk, skb); } - kfree_skb(psock->work_state.skb); - /* We null the skb here to ensure that calls to sk_psock_backlog - * do not pick up the free'd skb. - */ - psock->work_state.skb = NULL; __sk_psock_purge_ingress_msg(psock); } @@ -809,7 +798,6 @@ void sk_psock_stop(struct sk_psock *psock) spin_lock_bh(&psock->ingress_lock); sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); sk_psock_cork_free(psock); - __sk_psock_zap_ingress(psock); spin_unlock_bh(&psock->ingress_lock); } @@ -823,7 +811,8 @@ static void sk_psock_destroy(struct work_struct *work) sk_psock_done_strp(psock); - cancel_work_sync(&psock->work); + cancel_delayed_work_sync(&psock->work); + __sk_psock_zap_ingress(psock); mutex_destroy(&psock->work_mutex); psock_progs_drop(&psock->progs); @@ -938,7 +927,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb) } skb_queue_tail(&psock_other->ingress_skb, skb); - schedule_work(&psock_other->work); + schedule_delayed_work(&psock_other->work, 0); spin_unlock_bh(&psock_other->ingress_lock); return 0; } @@ -990,10 +979,8 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, err = -EIO; sk_other = psock->sk; if (sock_flag(sk_other, SOCK_DEAD) || - !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { - skb_bpf_redirect_clear(skb); + !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) goto out_free; - } skb_bpf_set_ingress(skb); @@ -1018,22 +1005,23 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { skb_queue_tail(&psock->ingress_skb, skb); - schedule_work(&psock->work); + schedule_delayed_work(&psock->work, 0); err = 0; } spin_unlock_bh(&psock->ingress_lock); - if (err < 0) { - skb_bpf_redirect_clear(skb); + if (err < 0) goto out_free; - } } break; case __SK_REDIRECT: + tcp_eat_skb(psock->sk, skb); err = sk_psock_skb_redirect(psock, skb); break; case __SK_DROP: default: out_free: + skb_bpf_redirect_clear(skb); + tcp_eat_skb(psock->sk, skb); sock_drop(psock->sk, skb); } @@ -1049,7 +1037,7 @@ static void sk_psock_write_space(struct sock *sk) psock = sk_psock(sk); if (likely(psock)) { if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) - schedule_work(&psock->work); + schedule_delayed_work(&psock->work, 0); write_space = psock->saved_write_space; } rcu_read_unlock(); @@ -1078,8 +1066,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) skb_dst_drop(skb); skb_bpf_redirect_clear(skb); ret = bpf_prog_run_pin_on_cpu(prog, skb); - if (ret == SK_PASS) - skb_bpf_set_strparser(skb); + skb_bpf_set_strparser(skb); ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); skb->sk = NULL; } @@ -1183,12 +1170,11 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb) int ret = __SK_DROP; int len = skb->len; - skb_get(skb); - rcu_read_lock(); psock = sk_psock(sk); if (unlikely(!psock)) { len = 0; + tcp_eat_skb(sk, skb); sock_drop(sk, skb); goto out; } @@ -1212,12 +1198,21 @@ out: static void sk_psock_verdict_data_ready(struct sock *sk) { struct socket *sock = sk->sk_socket; + int copied; trace_sk_data_ready(sk); if (unlikely(!sock || !sock->ops || !sock->ops->read_skb)) return; - sock->ops->read_skb(sk, sk_psock_verdict_recv); + copied = sock->ops->read_skb(sk, sk_psock_verdict_recv); + if (copied >= 0) { + struct sk_psock *psock; + + rcu_read_lock(); + psock = sk_psock(sk); + psock->saved_data_ready(sk); + rcu_read_unlock(); + } } void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 7c189c2e2fbf..00afb66cd095 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1644,9 +1644,10 @@ void sock_map_close(struct sock *sk, long timeout) rcu_read_unlock(); sk_psock_stop(psock); release_sock(sk); - cancel_work_sync(&psock->work); + cancel_delayed_work_sync(&psock->work); sk_psock_put(sk, psock); } + /* Make sure we do not recurse. This is a bug. * Leak the socket instead of crashing on a stack overflow. */ diff --git a/net/core/stream.c b/net/core/stream.c index 434446ab14c5..f5c4e47df165 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -73,8 +73,8 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p) add_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending++; done = sk_wait_event(sk, timeo_p, - !sk->sk_err && - !((1 << sk->sk_state) & + !READ_ONCE(sk->sk_err) && + !((1 << READ_ONCE(sk->sk_state)) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)), &wait); remove_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending--; @@ -87,9 +87,9 @@ EXPORT_SYMBOL(sk_stream_wait_connect); * sk_stream_closing - Return 1 if we still have things to send in our buffers. * @sk: socket to verify */ -static inline int sk_stream_closing(struct sock *sk) +static int sk_stream_closing(const struct sock *sk) { - return (1 << sk->sk_state) & + return (1 << READ_ONCE(sk->sk_state)) & (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK); } @@ -142,8 +142,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; - sk_wait_event(sk, ¤t_timeo, sk->sk_err || - (sk->sk_shutdown & SEND_SHUTDOWN) || + sk_wait_event(sk, ¤t_timeo, READ_ONCE(sk->sk_err) || + (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) || (sk_stream_memory_free(sk) && !vm_wait), &wait); sk->sk_write_pending--; diff --git a/net/devlink/core.c b/net/devlink/core.c index 777b091ef74d..c23ebabadc52 100644 --- a/net/devlink/core.c +++ b/net/devlink/core.c @@ -204,11 +204,6 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops, if (ret < 0) goto err_xa_alloc; - devlink->netdevice_nb.notifier_call = devlink_port_netdevice_event; - ret = register_netdevice_notifier(&devlink->netdevice_nb); - if (ret) - goto err_register_netdevice_notifier; - devlink->dev = dev; devlink->ops = ops; xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC); @@ -233,8 +228,6 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops, return devlink; -err_register_netdevice_notifier: - xa_erase(&devlinks, devlink->index); err_xa_alloc: kfree(devlink); return NULL; @@ -266,8 +259,6 @@ void devlink_free(struct devlink *devlink) xa_destroy(&devlink->params); xa_destroy(&devlink->ports); - WARN_ON_ONCE(unregister_netdevice_notifier(&devlink->netdevice_nb)); - xa_erase(&devlinks, devlink->index); devlink_put(devlink); @@ -303,6 +294,10 @@ static struct pernet_operations devlink_pernet_ops __net_initdata = { .pre_exit = devlink_pernet_pre_exit, }; +static struct notifier_block devlink_port_netdevice_nb = { + .notifier_call = devlink_port_netdevice_event, +}; + static int __init devlink_init(void) { int err; @@ -311,6 +306,9 @@ static int __init devlink_init(void) if (err) goto out; err = register_pernet_subsys(&devlink_pernet_ops); + if (err) + goto out; + err = register_netdevice_notifier(&devlink_port_netdevice_nb); out: WARN_ON(err); diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h index e133f423294a..62921b2eb0d3 100644 --- a/net/devlink/devl_internal.h +++ b/net/devlink/devl_internal.h @@ -50,7 +50,6 @@ struct devlink { u8 reload_failed:1; refcount_t refcount; struct rcu_work rwork; - struct notifier_block netdevice_nb; char priv[] __aligned(NETDEV_ALIGN); }; diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index dffca2f9bfa7..cd0254968076 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -7073,10 +7073,9 @@ int devlink_port_netdevice_event(struct notifier_block *nb, struct devlink_port *devlink_port = netdev->devlink_port; struct devlink *devlink; - devlink = container_of(nb, struct devlink, netdevice_nb); - - if (!devlink_port || devlink_port->devlink != devlink) + if (!devlink_port) return NOTIFY_OK; + devlink = devlink_port->devlink; switch (event) { case NETDEV_POST_INIT: diff --git a/net/handshake/handshake-test.c b/net/handshake/handshake-test.c index e6adc5dec11a..6d37bab35c8f 100644 --- a/net/handshake/handshake-test.c +++ b/net/handshake/handshake-test.c @@ -102,7 +102,7 @@ struct handshake_req_alloc_test_param handshake_req_alloc_params[] = { { .desc = "handshake_req_alloc excessive privsize", .proto = &handshake_req_alloc_proto_6, - .gfp = GFP_KERNEL, + .gfp = GFP_KERNEL | __GFP_NOWARN, .expect_success = false, }, { @@ -209,6 +209,7 @@ static void handshake_req_submit_test4(struct kunit *test) { struct handshake_req *req, *result; struct socket *sock; + struct file *filp; int err; /* Arrange */ @@ -218,9 +219,10 @@ static void handshake_req_submit_test4(struct kunit *test) err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); KUNIT_ASSERT_NOT_NULL(test, sock->sk); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); @@ -241,6 +243,7 @@ static void handshake_req_submit_test5(struct kunit *test) struct handshake_req *req; struct handshake_net *hn; struct socket *sock; + struct file *filp; struct net *net; int saved, err; @@ -251,9 +254,10 @@ static void handshake_req_submit_test5(struct kunit *test) err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); KUNIT_ASSERT_NOT_NULL(test, sock->sk); + sock->file = filp; net = sock_net(sock->sk); hn = handshake_pernet(net); @@ -276,6 +280,7 @@ static void handshake_req_submit_test6(struct kunit *test) { struct handshake_req *req1, *req2; struct socket *sock; + struct file *filp; int err; /* Arrange */ @@ -287,9 +292,10 @@ static void handshake_req_submit_test6(struct kunit *test) err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); KUNIT_ASSERT_NOT_NULL(test, sock->sk); + sock->file = filp; /* Act */ err = handshake_req_submit(sock, req1, GFP_KERNEL); @@ -307,6 +313,7 @@ static void handshake_req_cancel_test1(struct kunit *test) { struct handshake_req *req; struct socket *sock; + struct file *filp; bool result; int err; @@ -318,8 +325,9 @@ static void handshake_req_cancel_test1(struct kunit *test) &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); @@ -340,6 +348,7 @@ static void handshake_req_cancel_test2(struct kunit *test) struct handshake_req *req, *next; struct handshake_net *hn; struct socket *sock; + struct file *filp; struct net *net; bool result; int err; @@ -352,8 +361,9 @@ static void handshake_req_cancel_test2(struct kunit *test) &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); @@ -380,6 +390,7 @@ static void handshake_req_cancel_test3(struct kunit *test) struct handshake_req *req, *next; struct handshake_net *hn; struct socket *sock; + struct file *filp; struct net *net; bool result; int err; @@ -392,8 +403,9 @@ static void handshake_req_cancel_test3(struct kunit *test) &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); @@ -436,6 +448,7 @@ static void handshake_req_destroy_test1(struct kunit *test) { struct handshake_req *req; struct socket *sock; + struct file *filp; int err; /* Arrange */ @@ -448,8 +461,9 @@ static void handshake_req_destroy_test1(struct kunit *test) &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); diff --git a/net/handshake/handshake.h b/net/handshake/handshake.h index 4dac965c99df..8aeaadca844f 100644 --- a/net/handshake/handshake.h +++ b/net/handshake/handshake.h @@ -31,6 +31,7 @@ struct handshake_req { struct list_head hr_list; struct rhash_head hr_rhash; unsigned long hr_flags; + struct file *hr_file; const struct handshake_proto *hr_proto; struct sock *hr_sk; void (*hr_odestruct)(struct sock *sk); diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c index 35c9c445e0b8..1086653e1fad 100644 --- a/net/handshake/netlink.c +++ b/net/handshake/netlink.c @@ -48,7 +48,7 @@ int handshake_genl_notify(struct net *net, const struct handshake_proto *proto, proto->hp_handler_class)) return -ESRCH; - msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, flags); if (!msg) return -ENOMEM; @@ -99,9 +99,6 @@ static int handshake_dup(struct socket *sock) struct file *file; int newfd; - if (!sock->file) - return -EBADF; - file = get_file(sock->file); newfd = get_unused_fd_flags(O_CLOEXEC); if (newfd < 0) { @@ -142,15 +139,16 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info) goto out_complete; } err = req->hr_proto->hp_accept(req, info, fd); - if (err) + if (err) { + fput(sock->file); goto out_complete; + } trace_handshake_cmd_accept(net, req, req->hr_sk, fd); return 0; out_complete: handshake_complete(req, -EIO, NULL); - fput(sock->file); out_status: trace_handshake_cmd_accept_err(net, req, NULL, err); return err; @@ -159,8 +157,8 @@ out_status: int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info) { struct net *net = sock_net(skb->sk); + struct handshake_req *req = NULL; struct socket *sock = NULL; - struct handshake_req *req; int fd, status, err; if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD)) diff --git a/net/handshake/request.c b/net/handshake/request.c index 94d5cef3e048..d78d41abb3d9 100644 --- a/net/handshake/request.c +++ b/net/handshake/request.c @@ -239,6 +239,7 @@ int handshake_req_submit(struct socket *sock, struct handshake_req *req, } req->hr_odestruct = req->hr_sk->sk_destruct; req->hr_sk->sk_destruct = handshake_sk_destruct; + req->hr_file = sock->file; ret = -EOPNOTSUPP; net = sock_net(req->hr_sk); @@ -334,6 +335,9 @@ bool handshake_req_cancel(struct sock *sk) return false; } + /* Request accepted and waiting for DONE */ + fput(req->hr_file); + out_true: trace_handshake_cancel(net, req, sk); diff --git a/net/handshake/tlshd.c b/net/handshake/tlshd.c index fcbeb63b4eb1..b735f5cced2f 100644 --- a/net/handshake/tlshd.c +++ b/net/handshake/tlshd.c @@ -31,6 +31,7 @@ struct tls_handshake_req { int th_type; unsigned int th_timeout_ms; int th_auth_mode; + const char *th_peername; key_serial_t th_keyring; key_serial_t th_certificate; key_serial_t th_privkey; @@ -48,6 +49,7 @@ tls_handshake_req_init(struct handshake_req *req, treq->th_timeout_ms = args->ta_timeout_ms; treq->th_consumer_done = args->ta_done; treq->th_consumer_data = args->ta_data; + treq->th_peername = args->ta_peername; treq->th_keyring = args->ta_keyring; treq->th_num_peerids = 0; treq->th_certificate = TLS_NO_CERT; @@ -214,6 +216,12 @@ static int tls_handshake_accept(struct handshake_req *req, ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_MESSAGE_TYPE, treq->th_type); if (ret < 0) goto out_cancel; + if (treq->th_peername) { + ret = nla_put_string(msg, HANDSHAKE_A_ACCEPT_PEERNAME, + treq->th_peername); + if (ret < 0) + goto out_cancel; + } if (treq->th_timeout_ms) { ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_TIMEOUT, treq->th_timeout_ms); if (ret < 0) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 940062e08f57..c4aab3aacbd8 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -894,7 +894,7 @@ int inet_shutdown(struct socket *sock, int how) EPOLLHUP, even on eg. unconnected UDP sockets -- RR */ fallthrough; default: - sk->sk_shutdown |= how; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how); if (sk->sk_prot->shutdown) sk->sk_prot->shutdown(sk, how); break; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index b511ff0adc0a..8e97d8d4cc9d 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -317,7 +317,14 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, ipc->tos = val; ipc->priority = rt_tos2priority(ipc->tos); break; - + case IP_PROTOCOL: + if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) + return -EINVAL; + val = *(int *)CMSG_DATA(cmsg); + if (val < 1 || val > 255) + return -EINVAL; + ipc->protocol = val; + break; default: return -EINVAL; } @@ -1761,6 +1768,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname, case IP_LOCAL_PORT_RANGE: val = inet->local_port_range.hi << 16 | inet->local_port_range.lo; break; + case IP_PROTOCOL: + val = inet_sk(sk)->inet_num; + break; default: sockopt_release_sock(sk); return -ENOPROTOOPT; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ff712bf2a98d..eadf1c9ef7e4 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -532,6 +532,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } ipcm_init_sk(&ipc, inet); + /* Keep backward compat */ + if (hdrincl) + ipc.protocol = IPPROTO_RAW; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); @@ -599,7 +602,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, RT_SCOPE_UNIVERSE, - hdrincl ? IPPROTO_RAW : sk->sk_protocol, + hdrincl ? ipc.protocol : sk->sk_protocol, inet_sk_flowi_flags(sk) | (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 20db115c38c4..a60f6f4e7cd9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -498,6 +498,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) __poll_t mask; struct sock *sk = sock->sk; const struct tcp_sock *tp = tcp_sk(sk); + u8 shutdown; int state; sock_poll_wait(file, sock, wait); @@ -540,9 +541,10 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) * NOTE. Check for TCP_CLOSE is added. The goal is to prevent * blocking on fresh not-connected or disconnected socket. --ANK */ - if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) + shutdown = READ_ONCE(sk->sk_shutdown); + if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* Connected or passive Fast Open socket? */ @@ -559,7 +561,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (tcp_stream_is_readable(sk, target)) mask |= EPOLLIN | EPOLLRDNORM; - if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { + if (!(shutdown & SEND_SHUTDOWN)) { if (__sk_stream_is_writeable(sk, 1)) { mask |= EPOLLOUT | EPOLLWRNORM; } else { /* send SIGIO later */ @@ -1569,7 +1571,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) * calculation of whether or not we must ACK for the sake of * a window update. */ -static void __tcp_cleanup_rbuf(struct sock *sk, int copied) +void __tcp_cleanup_rbuf(struct sock *sk, int copied) { struct tcp_sock *tp = tcp_sk(sk); bool time_to_ack = false; @@ -1771,7 +1773,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); tcp_flags = TCP_SKB_CB(skb)->tcp_flags; used = recv_actor(sk, skb); - consume_skb(skb); if (used < 0) { if (!copied) copied = used; @@ -1785,14 +1786,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) break; } } - WRITE_ONCE(tp->copied_seq, seq); - - tcp_rcv_space_adjust(sk); - - /* Clean up data we have read: This will do ACK frames. */ - if (copied > 0) - __tcp_cleanup_rbuf(sk, copied); - return copied; } EXPORT_SYMBOL(tcp_read_skb); @@ -2867,7 +2860,7 @@ void __tcp_close(struct sock *sk, long timeout) int data_was_unread = 0; int state; - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); @@ -3119,7 +3112,7 @@ int tcp_disconnect(struct sock *sk, int flags) inet_bhash2_reset_saddr(sk); - sk->sk_shutdown = 0; + WRITE_ONCE(sk->sk_shutdown, 0); sock_reset_flag(sk, SOCK_DONE); tp->srtt_us = 0; tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); @@ -4649,7 +4642,7 @@ void tcp_done(struct sock *sk) if (req) reqsk_fastopen_remove(sk, req, false); - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index ebf917511937..5f93918c063c 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -11,6 +11,24 @@ #include <net/inet_common.h> #include <net/tls.h> +void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tcp; + int copied; + + if (!skb || !skb->len || !sk_is_tcp(sk)) + return; + + if (skb_bpf_strparser(skb)) + return; + + tcp = tcp_sk(sk); + copied = tcp->copied_seq + skb->len; + WRITE_ONCE(tcp->copied_seq, copied); + tcp_rcv_space_adjust(sk); + __tcp_cleanup_rbuf(sk, skb->len); +} + static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg, u32 apply_bytes, int flags) { @@ -168,20 +186,40 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); ret = sk_wait_event(sk, &timeo, !list_empty(&psock->ingress_msg) || - !skb_queue_empty(&sk->sk_receive_queue), &wait); + !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); remove_wait_queue(sk_sleep(sk), &wait); return ret; } +static bool is_next_msg_fin(struct sk_psock *psock) +{ + struct scatterlist *sge; + struct sk_msg *msg_rx; + int i; + + msg_rx = sk_psock_peek_msg(psock); + i = msg_rx->sg.start; + sge = sk_msg_elem(msg_rx, i); + if (!sge->length) { + struct sk_buff *skb = msg_rx->skb; + + if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + return true; + } + return false; +} + static int tcp_bpf_recvmsg_parser(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { + struct tcp_sock *tcp = tcp_sk(sk); + u32 seq = tcp->copied_seq; struct sk_psock *psock; - int copied; + int copied = 0; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); @@ -194,8 +232,43 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, return tcp_recvmsg(sk, msg, len, flags, addr_len); lock_sock(sk); + + /* We may have received data on the sk_receive_queue pre-accept and + * then we can not use read_skb in this context because we haven't + * assigned a sk_socket yet so have no link to the ops. The work-around + * is to check the sk_receive_queue and in these cases read skbs off + * queue again. The read_skb hook is not running at this point because + * of lock_sock so we avoid having multiple runners in read_skb. + */ + if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { + tcp_data_ready(sk); + /* This handles the ENOMEM errors if we both receive data + * pre accept and are already under memory pressure. At least + * let user know to retry. + */ + if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { + copied = -EAGAIN; + goto out; + } + } + msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); + /* The typical case for EFAULT is the socket was gracefully + * shutdown with a FIN pkt. So check here the other case is + * some error on copy_page_to_iter which would be unexpected. + * On fin return correct return code to zero. + */ + if (copied == -EFAULT) { + bool is_fin = is_next_msg_fin(psock); + + if (is_fin) { + copied = 0; + seq++; + goto out; + } + } + seq += copied; if (!copied) { long timeo; int data; @@ -233,6 +306,10 @@ msg_bytes_ready: copied = -EAGAIN; } out: + WRITE_ONCE(tcp->copied_seq, seq); + tcp_rcv_space_adjust(sk); + if (copied > 0) + __tcp_cleanup_rbuf(sk, copied); release_sock(sk); sk_psock_put(sk, psock); return copied; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a057330d6f59..61b6710f337a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4362,7 +4362,7 @@ void tcp_fin(struct sock *sk) inet_csk_schedule_ack(sk); - sk->sk_shutdown |= RCV_SHUTDOWN; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); sock_set_flag(sk, SOCK_DONE); switch (sk->sk_state) { @@ -6599,7 +6599,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) break; tcp_set_state(sk, TCP_FIN_WAIT2); - sk->sk_shutdown |= SEND_SHUTDOWN; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN); sk_dst_confirm(sk); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 39bda2b1066e..06d2573685ca 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -829,6 +829,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) inet_twsk(sk)->tw_priority : sk->sk_priority; transmit_time = tcp_transmit_time(sk); xfrm_sk_clone_policy(ctl_sk, sk); + } else { + ctl_sk->sk_mark = 0; + ctl_sk->sk_priority = 0; } ip_send_unicast_reply(ctl_sk, skb, &TCP_SKB_CB(skb)->header.h4.opt, @@ -836,7 +839,6 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) &arg, arg.iov[0].iov_len, transmit_time); - ctl_sk->sk_mark = 0; xfrm_sk_free_policy(ctl_sk); sock_net_set(ctl_sk, &init_net); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); @@ -935,7 +937,6 @@ static void tcp_v4_send_ack(const struct sock *sk, &arg, arg.iov[0].iov_len, transmit_time); - ctl_sk->sk_mark = 0; sock_net_set(ctl_sk, &init_net); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); local_bh_enable(); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index aa32afd871ee..9482def1f310 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1818,7 +1818,7 @@ EXPORT_SYMBOL(__skb_recv_udp); int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { struct sk_buff *skb; - int err, copied; + int err; try_again: skb = skb_recv_udp(sk, MSG_DONTWAIT, &err); @@ -1837,10 +1837,7 @@ try_again: } WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); - copied = recv_actor(sk, skb); - kfree_skb(skb); - - return copied; + return recv_actor(sk, skb); } EXPORT_SYMBOL(udp_read_skb); diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index e0c9cc39b81e..56d94d23b9e0 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -64,6 +64,8 @@ struct proto udplite_prot = { .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, .sysctl_mem = sysctl_udp_mem, + .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), + .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), .obj_size = sizeof(struct udp_sock), .h.udp_table = &udplite_table, }; diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index da46c4284676..49e31e4ae7b7 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c @@ -143,6 +143,8 @@ int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type) optlen = 1; break; default: + if (len < 2) + goto bad; optlen = nh[offset + 1] + 2; if (optlen > len) goto bad; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 2438da5ff6da..bac768d36cc1 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -2491,7 +2491,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v) const struct net_device *dev; if (rt->nh) - fib6_nh = nexthop_fib6_nh_bh(rt->nh); + fib6_nh = nexthop_fib6_nh(rt->nh); seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen); @@ -2556,14 +2556,14 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, if (tbl) { h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1; - node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist)); + node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist)); } else { h = 0; node = NULL; } while (!node && h < FIB6_TABLE_HASHSZ) { - node = rcu_dereference_bh( + node = rcu_dereference( hlist_first_rcu(&net->ipv6.fib_table_hash[h++])); } return hlist_entry_safe(node, struct fib6_table, tb6_hlist); @@ -2593,7 +2593,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (!v) goto iter_table; - n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next); + n = rcu_dereference(((struct fib6_info *)v)->fib6_next); if (n) return n; @@ -2619,12 +2619,12 @@ iter_table: } static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(RCU_BH) + __acquires(RCU) { struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private; - rcu_read_lock_bh(); + rcu_read_lock(); iter->tbl = ipv6_route_seq_next_table(NULL, net); iter->skip = *pos; @@ -2645,7 +2645,7 @@ static bool ipv6_route_iter_active(struct ipv6_route_iter *iter) } static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v) - __releases(RCU_BH) + __releases(RCU) { struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private; @@ -2653,7 +2653,7 @@ static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v) if (ipv6_route_iter_active(iter)) fib6_walker_unlink(net, &iter->w); - rcu_read_unlock_bh(); + rcu_read_unlock(); } #if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index a4ecfc9d2593..da80974ad23a 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1015,12 +1015,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, ntohl(tun_id), ntohl(md->u.index), truncate, false); + proto = htons(ETH_P_ERSPAN); } else if (md->version == 2) { erspan_build_header_v2(skb, ntohl(tun_id), md->u.md2.dir, get_hwid(&md->u.md2), truncate, false); + proto = htons(ETH_P_ERSPAN2); } else { goto tx_err; } @@ -1043,24 +1045,25 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, break; } - if (t->parms.erspan_ver == 1) + if (t->parms.erspan_ver == 1) { erspan_build_header(skb, ntohl(t->parms.o_key), t->parms.index, truncate, false); - else if (t->parms.erspan_ver == 2) + proto = htons(ETH_P_ERSPAN); + } else if (t->parms.erspan_ver == 2) { erspan_build_header_v2(skb, ntohl(t->parms.o_key), t->parms.dir, t->parms.hwid, truncate, false); - else + proto = htons(ETH_P_ERSPAN2); + } else { goto tx_err; + } fl6.daddr = t->parms.raddr; } /* Push GRE header. */ - proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) - : htons(ETH_P_ERSPAN2); gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno))); /* TooBig packet may have updated dst->dev's mtu */ diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 7d0adb612bdd..44ee7a2e72ac 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -793,7 +793,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (!proto) proto = inet->inet_num; - else if (proto != inet->inet_num) + else if (proto != inet->inet_num && + inet->inet_num != IPPROTO_RAW) return -EINVAL; if (proto > 255) diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 67eaf3ca14ce..3bab0cc13697 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -60,6 +60,8 @@ struct proto udplitev6_prot = { .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, .sysctl_mem = sysctl_udp_mem, + .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), + .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), .obj_size = sizeof(struct udp6_sock), .h.udp_table = &udplite_table, }; diff --git a/net/key/af_key.c b/net/key/af_key.c index a815f5ab4c49..31ab12fd720a 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1940,7 +1940,8 @@ static u32 gen_reqid(struct net *net) } static int -parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) +parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_policy *pol, + struct sadb_x_ipsecrequest *rq) { struct net *net = xp_net(xp); struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr; @@ -1958,9 +1959,12 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0) return -EINVAL; t->mode = mode; - if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) + if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) { + if ((mode == XFRM_MODE_TUNNEL || mode == XFRM_MODE_BEET) && + pol->sadb_x_policy_dir == IPSEC_DIR_OUTBOUND) + return -EINVAL; t->optional = 1; - else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { + } else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { t->reqid = rq->sadb_x_ipsecrequest_reqid; if (t->reqid > IPSEC_MANUAL_REQID_MAX) t->reqid = 0; @@ -2002,7 +2006,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol) rq->sadb_x_ipsecrequest_len < sizeof(*rq)) return -EINVAL; - if ((err = parse_ipsecrequest(xp, rq)) < 0) + if ((err = parse_ipsecrequest(xp, pol, rq)) < 0) return err; len -= rq->sadb_x_ipsecrequest_len; rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index da7fe94bea2e..9ffbc667be6c 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -583,7 +583,8 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout) add_wait_queue(sk_sleep(sk), &wait); while (1) { - if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE, &wait)) + if (sk_wait_event(sk, &timeout, + READ_ONCE(sk->sk_state) == TCP_CLOSE, &wait)) break; rc = -ERESTARTSYS; if (signal_pending(current)) @@ -603,7 +604,8 @@ static bool llc_ui_wait_for_conn(struct sock *sk, long timeout) add_wait_queue(sk_sleep(sk), &wait); while (1) { - if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT, &wait)) + if (sk_wait_event(sk, &timeout, + READ_ONCE(sk->sk_state) != TCP_SYN_SENT, &wait)) break; if (signal_pending(current) || !timeout) break; @@ -622,7 +624,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) while (1) { rc = 0; if (sk_wait_event(sk, &timeout, - (sk->sk_shutdown & RCV_SHUTDOWN) || + (READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN) || (!llc_data_accept_state(llc->state) && !llc->remote_busy_flag && !llc->p_flag), &wait)) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 7317e4a5d1ff..86b2036d73ff 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1578,9 +1578,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev, sdata_dereference(link->u.ap.unsol_bcast_probe_resp, sdata); - /* abort any running channel switch */ + /* abort any running channel switch or color change */ mutex_lock(&local->mtx); link_conf->csa_active = false; + link_conf->color_change_active = false; if (link->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); @@ -3589,7 +3590,7 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t EXPORT_SYMBOL(ieee80211_channel_switch_disconnect); static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata, - u32 *changed) + u64 *changed) { int err; @@ -3632,7 +3633,7 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata, static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - u32 changed = 0; + u64 changed = 0; int err; sdata_assert_lock(sdata); diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index dbc34fbe7c8f..77c90ed8f5d7 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -258,7 +258,8 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata, static enum nl80211_chan_width ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata, - struct ieee80211_chanctx_conf *conf) + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) { enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; struct ieee80211_vif *vif = &sdata->vif; @@ -267,13 +268,14 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata, rcu_read_lock(); for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT; - struct ieee80211_bss_conf *link_conf = - rcu_dereference(sdata->vif.link_conf[link_id]); + struct ieee80211_link_data *link = + rcu_dereference(sdata->link[link_id]); - if (!link_conf) + if (!link) continue; - if (rcu_access_pointer(link_conf->chanctx_conf) != conf) + if (link != rsvd_for && + rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf) continue; switch (vif->type) { @@ -287,7 +289,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata, * point, so take the width from the chandef, but * account also for TDLS peers */ - width = max(link_conf->chandef.width, + width = max(link->conf->chandef.width, ieee80211_get_max_required_bw(sdata, link_id)); break; case NL80211_IFTYPE_P2P_DEVICE: @@ -296,7 +298,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: - width = link_conf->chandef.width; + width = link->conf->chandef.width; break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_UNSPECIFIED: @@ -316,7 +318,8 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata, static enum nl80211_chan_width ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, - struct ieee80211_chanctx_conf *conf) + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) { struct ieee80211_sub_if_data *sdata; enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; @@ -328,7 +331,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, if (!ieee80211_sdata_running(sdata)) continue; - width = ieee80211_get_chanctx_vif_max_required_bw(sdata, conf); + width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx, + rsvd_for); max_bw = max(max_bw, width); } @@ -336,8 +340,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, /* use the configured bandwidth in case of monitor interface */ sdata = rcu_dereference(local->monitor_sdata); if (sdata && - rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == conf) - max_bw = max(max_bw, conf->def.width); + rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf) + max_bw = max(max_bw, ctx->conf.def.width); rcu_read_unlock(); @@ -349,8 +353,10 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, * the max of min required widths of all the interfaces bound to this * channel context. */ -static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx) +static u32 +_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) { enum nl80211_chan_width max_bw; struct cfg80211_chan_def min_def; @@ -370,7 +376,7 @@ static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, return 0; } - max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf); + max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for); /* downgrade chandef up to max_bw */ min_def = ctx->conf.def; @@ -448,9 +454,10 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local, * channel context. */ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx) + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) { - u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx); + u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); if (!changed) return; @@ -464,10 +471,11 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, ieee80211_chan_bw_change(local, ctx, false); } -static void ieee80211_change_chanctx(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx, - struct ieee80211_chanctx *old_ctx, - const struct cfg80211_chan_def *chandef) +static void _ieee80211_change_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_chanctx *old_ctx, + const struct cfg80211_chan_def *chandef, + struct ieee80211_link_data *rsvd_for) { u32 changed; @@ -492,7 +500,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local, ieee80211_chan_bw_change(local, old_ctx, true); if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) { - ieee80211_recalc_chanctx_min_def(local, ctx); + ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); return; } @@ -502,7 +510,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local, /* check if min chanctx also changed */ changed = IEEE80211_CHANCTX_CHANGE_WIDTH | - _ieee80211_recalc_chanctx_min_def(local, ctx); + _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); drv_change_chanctx(local, ctx, changed); if (!local->use_chanctx) { @@ -514,6 +522,14 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local, ieee80211_chan_bw_change(local, old_ctx, false); } +static void ieee80211_change_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_chanctx *old_ctx, + const struct cfg80211_chan_def *chandef) +{ + _ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL); +} + static struct ieee80211_chanctx * ieee80211_find_chanctx(struct ieee80211_local *local, const struct cfg80211_chan_def *chandef, @@ -638,7 +654,7 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local, ctx->conf.rx_chains_dynamic = 1; ctx->mode = mode; ctx->conf.radar_enabled = false; - ieee80211_recalc_chanctx_min_def(local, ctx); + _ieee80211_recalc_chanctx_min_def(local, ctx, NULL); return ctx; } @@ -855,6 +871,9 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link, } if (new_ctx) { + /* recalc considering the link we'll use it for now */ + ieee80211_recalc_chanctx_min_def(local, new_ctx, link); + ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx); if (ret) goto out; @@ -873,12 +892,12 @@ out: ieee80211_recalc_chanctx_chantype(local, curr_ctx); ieee80211_recalc_smps_chanctx(local, curr_ctx); ieee80211_recalc_radar_chanctx(local, curr_ctx); - ieee80211_recalc_chanctx_min_def(local, curr_ctx); + ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL); } if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) { ieee80211_recalc_txpower(sdata, false); - ieee80211_recalc_chanctx_min_def(local, new_ctx); + ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL); } if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && @@ -1270,7 +1289,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) ieee80211_link_update_chandef(link, &link->reserved_chandef); - ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef); + _ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link); vif_chsw[0].vif = &sdata->vif; vif_chsw[0].old_ctx = &old_ctx->conf; @@ -1300,7 +1319,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) if (ieee80211_chanctx_refcount(local, old_ctx) == 0) ieee80211_free_chanctx(local, old_ctx); - ieee80211_recalc_chanctx_min_def(local, new_ctx); + ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL); ieee80211_recalc_smps_chanctx(local, new_ctx); ieee80211_recalc_radar_chanctx(local, new_ctx); @@ -1665,7 +1684,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) ieee80211_recalc_chanctx_chantype(local, ctx); ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); - ieee80211_recalc_chanctx_min_def(local, ctx); + ieee80211_recalc_chanctx_min_def(local, ctx, NULL); list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, reserved_chanctx_list) { diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a0a7839cb961..b0372e76f373 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2537,7 +2537,8 @@ int ieee80211_chanctx_refcount(struct ieee80211_local *local, void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx); void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx); + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for); bool ieee80211_is_radar_required(struct ieee80211_local *local); void ieee80211_dfs_cac_timer(unsigned long data); diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index de5d69f21306..db0d0132c58c 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -67,7 +67,7 @@ __entry->min_freq_offset = (c)->chan ? (c)->chan->freq_offset : 0; \ __entry->min_chan_width = (c)->width; \ __entry->min_center_freq1 = (c)->center_freq1; \ - __entry->freq1_offset = (c)->freq1_offset; \ + __entry->min_freq1_offset = (c)->freq1_offset; \ __entry->min_center_freq2 = (c)->center_freq2; #define MIN_CHANDEF_PR_FMT " min_control:%d.%03d MHz min_width:%d min_center: %d.%03d/%d MHz" #define MIN_CHANDEF_PR_ARG __entry->min_control_freq, __entry->min_freq_offset, \ diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1a3327407552..0d9fbc8458fd 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3791,6 +3791,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, ieee80211_tx_result r; struct ieee80211_vif *vif = txq->vif; int q = vif->hw_queue[txq->ac]; + unsigned long flags; bool q_stopped; WARN_ON_ONCE(softirq_count() == 0); @@ -3799,9 +3800,9 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, return NULL; begin: - spin_lock(&local->queue_stop_reason_lock); + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); q_stopped = local->queue_stop_reasons[q]; - spin_unlock(&local->queue_stop_reason_lock); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); if (unlikely(q_stopped)) { /* mark for waking later */ diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 1527d6aafc14..4bf76150925d 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3015,7 +3015,7 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata, chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); - ieee80211_recalc_chanctx_min_def(local, chanctx); + ieee80211_recalc_chanctx_min_def(local, chanctx, NULL); } unlock: mutex_unlock(&local->chanctx_mtx); diff --git a/net/netfilter/core.c b/net/netfilter/core.c index f0783e42108b..5f76ae86a656 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -711,9 +711,11 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct) rcu_read_lock(); ct_hook = rcu_dereference(nf_ct_hook); - BUG_ON(ct_hook == NULL); - ct_hook->destroy(nfct); + if (ct_hook) + ct_hook->destroy(nfct); rcu_read_unlock(); + + WARN_ON(!ct_hook); } EXPORT_SYMBOL(nf_conntrack_destroy); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index d40544cd61a6..69c8c8c7e9b8 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2976,7 +2976,9 @@ nla_put_failure: return -1; } +#if IS_ENABLED(CONFIG_NF_NAT) static const union nf_inet_addr any_addr; +#endif static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp) { @@ -3460,10 +3462,12 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x, return 0; } +#if IS_ENABLED(CONFIG_NF_NAT) static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 }, [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED }, }; +#endif static int ctnetlink_parse_expect_nat(const struct nlattr *attr, diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 57f6724c99a7..169e16fc2bce 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -1218,11 +1218,12 @@ static int __init nf_conntrack_standalone_init(void) nf_conntrack_htable_size_user = nf_conntrack_htable_size; #endif + nf_conntrack_init_end(); + ret = register_pernet_subsys(&nf_conntrack_net_ops); if (ret < 0) goto out_pernet; - nf_conntrack_init_end(); return 0; out_pernet: diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 59fb8320ab4d..dc5675962de4 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3865,12 +3865,10 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net, struct nft_trans *trans; list_for_each_entry(trans, &nft_net->commit_list, list) { - struct nft_rule *rule = nft_trans_rule(trans); - if (trans->msg_type == NFT_MSG_NEWRULE && trans->ctx.chain == chain && id == nft_trans_rule_id(trans)) - return rule; + return nft_trans_rule(trans); } return ERR_PTR(-ENOENT); } diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c index c3563f0be269..680fe557686e 100644 --- a/net/netfilter/nft_chain_filter.c +++ b/net/netfilter/nft_chain_filter.c @@ -344,6 +344,12 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev, return; } + /* UNREGISTER events are also happening on netns exit. + * + * Although nf_tables core releases all tables/chains, only this event + * handler provides guarantee that hook->ops.dev is still accessible, + * so we cannot skip exiting net namespaces. + */ __nft_release_basechain(ctx); } @@ -362,9 +368,6 @@ static int nf_tables_netdev_event(struct notifier_block *this, event != NETDEV_CHANGENAME) return NOTIFY_DONE; - if (!check_net(ctx.net)) - return NOTIFY_DONE; - nft_net = nft_pernet(ctx.net); mutex_lock(&nft_net->commit_mutex); list_for_each_entry(table, &nft_net->tables, list) { diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 19ea4d3c3553..2f114aa10f1a 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -221,7 +221,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, { struct nft_set *set = (struct nft_set *)__set; struct rb_node *prev = rb_prev(&rbe->node); - struct nft_rbtree_elem *rbe_prev; + struct nft_rbtree_elem *rbe_prev = NULL; struct nft_set_gc_batch *gcb; gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC); @@ -229,17 +229,21 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, return -ENOMEM; /* search for expired end interval coming before this element. */ - do { + while (prev) { rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); if (nft_rbtree_interval_end(rbe_prev)) break; prev = rb_prev(prev); - } while (prev != NULL); + } + + if (rbe_prev) { + rb_erase(&rbe_prev->node, &priv->root); + atomic_dec(&set->nelems); + } - rb_erase(&rbe_prev->node, &priv->root); rb_erase(&rbe->node, &priv->root); - atomic_sub(2, &set->nelems); + atomic_dec(&set->nelems); nft_set_gc_batch_add(gcb, rbe); nft_set_gc_batch_complete(gcb); @@ -268,7 +272,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, struct nft_set_ext **ext) { struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL; - struct rb_node *node, *parent, **p, *first = NULL; + struct rb_node *node, *next, *parent, **p, *first = NULL; struct nft_rbtree *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); int d, err; @@ -307,7 +311,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, * Values stored in the tree are in reversed order, starting from * highest to lowest value. */ - for (node = first; node != NULL; node = rb_next(node)) { + for (node = first; node != NULL; node = next) { + next = rb_next(node); + rbe = rb_entry(node, struct nft_rbtree_elem, node); if (!nft_set_elem_active(&rbe->ext, genmask)) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7ef8b9a1e30c..c87804112d0c 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1990,7 +1990,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, skb_free_datagram(sk, skb); - if (nlk->cb_running && + if (READ_ONCE(nlk->cb_running) && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { ret = netlink_dump(sk); if (ret) { @@ -2302,7 +2302,7 @@ static int netlink_dump(struct sock *sk) if (cb->done) cb->done(cb); - nlk->cb_running = false; + WRITE_ONCE(nlk->cb_running, false); module = cb->module; skb = cb->skb; mutex_unlock(nlk->cb_mutex); @@ -2365,7 +2365,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, goto error_put; } - nlk->cb_running = true; + WRITE_ONCE(nlk->cb_running, true); nlk->dump_done_errno = INT_MAX; mutex_unlock(nlk->cb_mutex); @@ -2703,7 +2703,7 @@ static int netlink_native_seq_show(struct seq_file *seq, void *v) nlk->groups ? (u32)nlk->groups[0] : 0, sk_rmem_alloc_get(s), sk_wmem_alloc_get(s), - nlk->cb_running, + READ_ONCE(nlk->cb_running), refcount_read(&s->sk_refcnt), atomic_read(&s->sk_drops), sock_i_ino(s) diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index e9ca007718b7..0f23e5e8e03e 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c @@ -77,13 +77,12 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); + u16 mac_offset = skb->mac_header; unsigned int nsh_len, mac_len; __be16 proto; - int nhoff; skb_reset_network_header(skb); - nhoff = skb->network_header - skb->mac_header; mac_len = skb->mac_len; if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) @@ -108,15 +107,14 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len, - skb->network_header - nhoff, - mac_len); + mac_offset, mac_len); goto out; } for (skb = segs; skb; skb = skb->next) { skb->protocol = htons(ETH_P_NSH); __skb_push(skb, nsh_len); - skb_set_mac_header(skb, -nhoff); + skb->mac_header = mac_offset; skb->network_header = skb->mac_header + mac_len; skb->mac_len = mac_len; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 640d94e34635..94c6a1ffa459 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1934,10 +1934,8 @@ static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) /* Move network header to the right position for VLAN tagged packets */ if (likely(skb->dev->type == ARPHRD_ETHER) && eth_type_vlan(skb->protocol) && - __vlan_get_protocol(skb, skb->protocol, &depth) != 0) { - if (pskb_may_pull(skb, depth)) - skb_set_network_header(skb, depth); - } + vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) + skb_set_network_header(skb, depth); skb_probe_transport_header(skb); } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 2f66a2006517..2abe45af98e7 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -324,9 +324,12 @@ bool sctp_transport_pl_recv(struct sctp_transport *t) t->pl.probe_size += SCTP_PL_BIG_STEP; } else if (t->pl.state == SCTP_PL_SEARCH) { if (!t->pl.probe_high) { - t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, - SCTP_MAX_PLPMTU); - return false; + if (t->pl.probe_size < SCTP_MAX_PLPMTU) { + t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, + SCTP_MAX_PLPMTU); + return false; + } + t->pl.probe_high = SCTP_MAX_PLPMTU; } t->pl.probe_size += SCTP_PL_MIN_STEP; if (t->pl.probe_size >= t->pl.probe_high) { @@ -341,7 +344,7 @@ bool sctp_transport_pl_recv(struct sctp_transport *t) } else if (t->pl.state == SCTP_PL_COMPLETE) { /* Raise probe_size again after 30 * interval in Search Complete */ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ - t->pl.probe_size += SCTP_PL_MIN_STEP; + t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU); } return t->pl.state == SCTP_PL_COMPLETE; diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 50c38b624f77..538e9c6ec8c9 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -2000,8 +2000,10 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, return rc; /* create send buffer and rmb */ - if (smc_buf_create(new_smc, false)) + if (smc_buf_create(new_smc, false)) { + smc_conn_abort(new_smc, ini->first_contact_local); return SMC_CLC_DECL_MEM; + } return 0; } @@ -2217,8 +2219,11 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, smcr_version = ini->smcr_version; ini->smcr_version = SMC_V2; rc = smc_listen_rdma_init(new_smc, ini); - if (!rc) + if (!rc) { rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local); + if (rc) + smc_conn_abort(new_smc, ini->first_contact_local); + } if (!rc) return; ini->smcr_version = smcr_version; diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 31db7438857c..dbdf03e8aa5b 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -67,8 +67,8 @@ static void smc_close_stream_wait(struct smc_sock *smc, long timeout) rc = sk_wait_event(sk, &timeout, !smc_tx_prepared_sends(&smc->conn) || - sk->sk_err == ECONNABORTED || - sk->sk_err == ECONNRESET || + READ_ONCE(sk->sk_err) == ECONNABORTED || + READ_ONCE(sk->sk_err) == ECONNRESET || smc->conn.killed, &wait); if (rc) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 454356771cda..3f465faf2b68 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -127,6 +127,7 @@ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first) int i, j; /* do link balancing */ + conn->lnk = NULL; /* reset conn->lnk first */ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &conn->lgr->lnk[i]; diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 4380d32f5a5f..9a2f3638d161 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -267,9 +267,9 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo, sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); add_wait_queue(sk_sleep(sk), &wait); rc = sk_wait_event(sk, timeo, - sk->sk_err || + READ_ONCE(sk->sk_err) || cflags->peer_conn_abort || - sk->sk_shutdown & RCV_SHUTDOWN || + READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN || conn->killed || fcrit(conn), &wait); diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index f4b6a71ac488..45128443f1f1 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -113,8 +113,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags) break; /* at least 1 byte of free & no urgent data */ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk_wait_event(sk, &timeo, - sk->sk_err || - (sk->sk_shutdown & SEND_SHUTDOWN) || + READ_ONCE(sk->sk_err) || + (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) || smc_cdc_rxed_any_close(conn) || (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend), diff --git a/net/socket.c b/net/socket.c index a7b4b37d86df..b7e01d0fe082 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2911,7 +2911,7 @@ static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg, * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ - sock->sk->sk_err = -err; + WRITE_ONCE(sock->sk->sk_err, -err); } out_put: fput_light(sock->file, fput_needed); diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 212c5d57465a..9734e1d9f991 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -639,6 +639,16 @@ gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf, ret = write_bytes_to_xdr_buf(buf, offset, data, len); +#if IS_ENABLED(CONFIG_KUNIT) + /* + * CBC-CTS does not define an output IV but RFC 3962 defines it as the + * penultimate block of ciphertext, so copy that into the IV buffer + * before returning. + */ + if (encrypt) + memcpy(iv, data, crypto_sync_skcipher_ivsize(cipher)); +#endif + out: kfree(data); return ret; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index c8321de341ee..6debf4fd42d4 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -927,11 +927,10 @@ static void __rpc_execute(struct rpc_task *task) */ do_action = task->tk_action; /* Tasks with an RPC error status should exit */ - if (do_action != rpc_exit_task && + if (do_action && do_action != rpc_exit_task && (status = READ_ONCE(task->tk_rpc_status)) != 0) { task->tk_status = status; - if (do_action != NULL) - do_action = rpc_exit_task; + do_action = rpc_exit_task; } /* Callbacks override all actions */ if (task->tk_callback) { diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 26367cf4c17a..79967b6925bd 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1052,7 +1052,7 @@ static int __svc_register(struct net *net, const char *progname, #endif } - trace_svc_register(progname, version, protocol, port, family, error); + trace_svc_register(progname, version, family, protocol, port, error); return error; } @@ -1416,7 +1416,7 @@ err_bad_rpc: /* Only RPCv2 supported */ xdr_stream_encode_u32(xdr, RPC_VERSION); xdr_stream_encode_u32(xdr, RPC_VERSION); - goto sendit; + return 1; /* don't wrap */ err_bad_auth: dprintk("svc: authentication failed (%d)\n", @@ -1432,7 +1432,7 @@ err_bad_auth: err_bad_prog: dprintk("svc: unknown program %d\n", rqstp->rq_prog); serv->sv_stats->rpcbadfmt++; - xdr_stream_encode_u32(xdr, RPC_PROG_UNAVAIL); + *rqstp->rq_accept_statp = rpc_prog_unavail; goto sendit; err_bad_vers: @@ -1440,7 +1440,12 @@ err_bad_vers: rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); serv->sv_stats->rpcbadfmt++; - xdr_stream_encode_u32(xdr, RPC_PROG_MISMATCH); + *rqstp->rq_accept_statp = rpc_prog_mismatch; + + /* + * svc_authenticate() has already added the verifier and + * advanced the stream just past rq_accept_statp. + */ xdr_stream_encode_u32(xdr, process.mismatch.lovers); xdr_stream_encode_u32(xdr, process.mismatch.hivers); goto sendit; @@ -1449,19 +1454,19 @@ err_bad_proc: svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); serv->sv_stats->rpcbadfmt++; - xdr_stream_encode_u32(xdr, RPC_PROC_UNAVAIL); + *rqstp->rq_accept_statp = rpc_proc_unavail; goto sendit; err_garbage_args: svc_printk(rqstp, "failed to decode RPC header\n"); serv->sv_stats->rpcbadfmt++; - xdr_stream_encode_u32(xdr, RPC_GARBAGE_ARGS); + *rqstp->rq_accept_statp = rpc_garbage_args; goto sendit; err_system_err: serv->sv_stats->rpcbadfmt++; - xdr_stream_encode_u32(xdr, RPC_SYSTEM_ERR); + *rqstp->rq_accept_statp = rpc_system_err; goto sendit; } diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 84e5d7d31481..13a14897bc17 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -532,13 +532,23 @@ void svc_reserve(struct svc_rqst *rqstp, int space) } EXPORT_SYMBOL_GPL(svc_reserve); +static void free_deferred(struct svc_xprt *xprt, struct svc_deferred_req *dr) +{ + if (!dr) + return; + + xprt->xpt_ops->xpo_release_ctxt(xprt, dr->xprt_ctxt); + kfree(dr); +} + static void svc_xprt_release(struct svc_rqst *rqstp) { struct svc_xprt *xprt = rqstp->rq_xprt; - xprt->xpt_ops->xpo_release_rqst(rqstp); + xprt->xpt_ops->xpo_release_ctxt(xprt, rqstp->rq_xprt_ctxt); + rqstp->rq_xprt_ctxt = NULL; - kfree(rqstp->rq_deferred); + free_deferred(xprt, rqstp->rq_deferred); rqstp->rq_deferred = NULL; svc_rqst_release_pages(rqstp); @@ -1054,7 +1064,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt) spin_unlock_bh(&serv->sv_lock); while ((dr = svc_deferred_dequeue(xprt)) != NULL) - kfree(dr); + free_deferred(xprt, dr); call_xpt_users(xprt); svc_xprt_put(xprt); @@ -1176,8 +1186,8 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { spin_unlock(&xprt->xpt_lock); trace_svc_defer_drop(dr); + free_deferred(xprt, dr); svc_xprt_put(xprt); - kfree(dr); return; } dr->xprt = NULL; @@ -1222,14 +1232,14 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req) dr->addrlen = rqstp->rq_addrlen; dr->daddr = rqstp->rq_daddr; dr->argslen = rqstp->rq_arg.len >> 2; - dr->xprt_ctxt = rqstp->rq_xprt_ctxt; - rqstp->rq_xprt_ctxt = NULL; /* back up head to the start of the buffer and copy */ skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, dr->argslen << 2); } + dr->xprt_ctxt = rqstp->rq_xprt_ctxt; + rqstp->rq_xprt_ctxt = NULL; trace_svc_defer(rqstp); svc_xprt_get(rqstp->rq_xprt); dr->xprt = rqstp->rq_xprt; @@ -1262,6 +1272,8 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp) rqstp->rq_daddr = dr->daddr; rqstp->rq_respages = rqstp->rq_pages; rqstp->rq_xprt_ctxt = dr->xprt_ctxt; + + dr->xprt_ctxt = NULL; svc_xprt_received(rqstp->rq_xprt); return dr->argslen << 2; } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index a51c9b989d58..63fe7a338992 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -121,27 +121,27 @@ static void svc_reclassify_socket(struct socket *sock) #endif /** - * svc_tcp_release_rqst - Release transport-related resources - * @rqstp: request structure with resources to be released + * svc_tcp_release_ctxt - Release transport-related resources + * @xprt: the transport which owned the context + * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt * */ -static void svc_tcp_release_rqst(struct svc_rqst *rqstp) +static void svc_tcp_release_ctxt(struct svc_xprt *xprt, void *ctxt) { } /** - * svc_udp_release_rqst - Release transport-related resources - * @rqstp: request structure with resources to be released + * svc_udp_release_ctxt - Release transport-related resources + * @xprt: the transport which owned the context + * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt * */ -static void svc_udp_release_rqst(struct svc_rqst *rqstp) +static void svc_udp_release_ctxt(struct svc_xprt *xprt, void *ctxt) { - struct sk_buff *skb = rqstp->rq_xprt_ctxt; + struct sk_buff *skb = ctxt; - if (skb) { - rqstp->rq_xprt_ctxt = NULL; + if (skb) consume_skb(skb); - } } union svc_pktinfo_u { @@ -696,7 +696,8 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) unsigned int sent; int err; - svc_udp_release_rqst(rqstp); + svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt); + rqstp->rq_xprt_ctxt = NULL; svc_set_cmsg_data(rqstp, cmh); @@ -768,7 +769,7 @@ static const struct svc_xprt_ops svc_udp_ops = { .xpo_recvfrom = svc_udp_recvfrom, .xpo_sendto = svc_udp_sendto, .xpo_result_payload = svc_sock_result_payload, - .xpo_release_rqst = svc_udp_release_rqst, + .xpo_release_ctxt = svc_udp_release_ctxt, .xpo_detach = svc_sock_detach, .xpo_free = svc_sock_free, .xpo_has_wspace = svc_udp_has_wspace, @@ -895,6 +896,9 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) trace_svcsock_accept_err(xprt, serv->sv_name, err); return NULL; } + if (IS_ERR(sock_alloc_file(newsock, O_NONBLOCK, NULL))) + return NULL; + set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); err = kernel_getpeername(newsock, sin); @@ -935,7 +939,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) return &newsvsk->sk_xprt; failed: - sock_release(newsock); + sockfd_put(newsock); return NULL; } @@ -1298,7 +1302,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) unsigned int sent; int err; - svc_tcp_release_rqst(rqstp); + svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt); + rqstp->rq_xprt_ctxt = NULL; atomic_inc(&svsk->sk_sendqlen); mutex_lock(&xprt->xpt_mutex); @@ -1343,7 +1348,7 @@ static const struct svc_xprt_ops svc_tcp_ops = { .xpo_recvfrom = svc_tcp_recvfrom, .xpo_sendto = svc_tcp_sendto, .xpo_result_payload = svc_sock_result_payload, - .xpo_release_rqst = svc_tcp_release_rqst, + .xpo_release_ctxt = svc_tcp_release_ctxt, .xpo_detach = svc_tcp_sock_detach, .xpo_free = svc_sock_free, .xpo_has_wspace = svc_tcp_has_wspace, @@ -1430,7 +1435,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, struct socket *sock, int flags) { - struct file *filp = NULL; struct svc_sock *svsk; struct sock *inet; int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); @@ -1439,14 +1443,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, if (!svsk) return ERR_PTR(-ENOMEM); - if (!sock->file) { - filp = sock_alloc_file(sock, O_NONBLOCK, NULL); - if (IS_ERR(filp)) { - kfree(svsk); - return ERR_CAST(filp); - } - } - inet = sock->sk; if (pmap_register) { @@ -1456,8 +1452,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, inet->sk_protocol, ntohs(inet_sk(inet)->inet_sport)); if (err < 0) { - if (filp) - fput(filp); kfree(svsk); return ERR_PTR(err); } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 1c658fa43063..a22fe7587fa6 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -239,21 +239,20 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, } /** - * svc_rdma_release_rqst - Release transport-specific per-rqst resources - * @rqstp: svc_rqst being released + * svc_rdma_release_ctxt - Release transport-specific per-rqst resources + * @xprt: the transport which owned the context + * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt * * Ensure that the recv_ctxt is released whether or not a Reply * was sent. For example, the client could close the connection, * or svc_process could drop an RPC, before the Reply is sent. */ -void svc_rdma_release_rqst(struct svc_rqst *rqstp) +void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt) { - struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt; - struct svc_xprt *xprt = rqstp->rq_xprt; + struct svc_rdma_recv_ctxt *ctxt = vctxt; struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); - rqstp->rq_xprt_ctxt = NULL; if (ctxt) svc_rdma_recv_ctxt_put(rdma, ctxt); } diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 416b298f74dd..ca04f7a6a085 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -80,7 +80,7 @@ static const struct svc_xprt_ops svc_rdma_ops = { .xpo_recvfrom = svc_rdma_recvfrom, .xpo_sendto = svc_rdma_sendto, .xpo_result_payload = svc_rdma_result_payload, - .xpo_release_rqst = svc_rdma_release_rqst, + .xpo_release_ctxt = svc_rdma_release_ctxt, .xpo_detach = svc_rdma_detach, .xpo_free = svc_rdma_free, .xpo_has_wspace = svc_rdma_has_wspace, diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 35cac7733fd3..53881406e200 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -541,6 +541,19 @@ int tipc_bearer_mtu(struct net *net, u32 bearer_id) return mtu; } +int tipc_bearer_min_mtu(struct net *net, u32 bearer_id) +{ + int mtu = TIPC_MIN_BEARER_MTU; + struct tipc_bearer *b; + + rcu_read_lock(); + b = bearer_get(net, bearer_id); + if (b) + mtu += b->encap_hlen; + rcu_read_unlock(); + return mtu; +} + /* tipc_bearer_xmit_skb - sends buffer to destination over bearer */ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id, @@ -1138,8 +1151,8 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } #ifdef CONFIG_TIPC_MEDIA_UDP - if (tipc_udp_mtu_bad(nla_get_u32 - (props[TIPC_NLA_PROP_MTU]))) { + if (nla_get_u32(props[TIPC_NLA_PROP_MTU]) < + b->encap_hlen + TIPC_MIN_BEARER_MTU) { NL_SET_ERR_MSG(info->extack, "MTU value is out-of-range"); return -EINVAL; diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 490ad6e5f7a3..bd0cc5c287ef 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -146,6 +146,7 @@ struct tipc_media { * @identity: array index of this bearer within TIPC bearer array * @disc: ptr to link setup request * @net_plane: network plane ('A' through 'H') currently associated with bearer + * @encap_hlen: encap headers length * @up: bearer up flag (bit 0) * @refcnt: tipc_bearer reference counter * @@ -170,6 +171,7 @@ struct tipc_bearer { u32 identity; struct tipc_discoverer *disc; char net_plane; + u16 encap_hlen; unsigned long up; refcount_t refcnt; }; @@ -232,6 +234,7 @@ int tipc_bearer_setup(void); void tipc_bearer_cleanup(void); void tipc_bearer_stop(struct net *net); int tipc_bearer_mtu(struct net *net, u32 bearer_id); +int tipc_bearer_min_mtu(struct net *net, u32 bearer_id); bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id); void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id, struct sk_buff *skb, diff --git a/net/tipc/link.c b/net/tipc/link.c index b3ce24823f50..2eff1c7949cb 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2200,7 +2200,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, struct tipc_msg *hdr = buf_msg(skb); struct tipc_gap_ack_blks *ga = NULL; bool reply = msg_probe(hdr), retransmitted = false; - u32 dlen = msg_data_sz(hdr), glen = 0; + u32 dlen = msg_data_sz(hdr), glen = 0, msg_max; u16 peers_snd_nxt = msg_next_sent(hdr); u16 peers_tol = msg_link_tolerance(hdr); u16 peers_prio = msg_linkprio(hdr); @@ -2239,6 +2239,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, switch (mtyp) { case RESET_MSG: case ACTIVATE_MSG: + msg_max = msg_max_pkt(hdr); + if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id)) + break; /* Complete own link name with peer's interface name */ if_name = strrchr(l->name, ':') + 1; if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) @@ -2283,8 +2286,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, l->peer_session = msg_session(hdr); l->in_session = true; l->peer_bearer_id = msg_bearer_id(hdr); - if (l->mtu > msg_max_pkt(hdr)) - l->mtu = msg_max_pkt(hdr); + if (l->mtu > msg_max) + l->mtu = msg_max; break; case STATE_MSG: diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 37edfe10f8c6..dd73d71c02a9 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -314,9 +314,9 @@ static void tsk_rej_rx_queue(struct sock *sk, int error) tipc_sk_respond(sk, skb, error); } -static bool tipc_sk_connected(struct sock *sk) +static bool tipc_sk_connected(const struct sock *sk) { - return sk->sk_state == TIPC_ESTABLISHED; + return READ_ONCE(sk->sk_state) == TIPC_ESTABLISHED; } /* tipc_sk_type_connectionless - check if the socket is datagram socket diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index c2bb818704c8..0a85244fd618 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -738,8 +738,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, udp_conf.local_ip.s_addr = local.ipv4.s_addr; udp_conf.use_udp_checksums = false; ub->ifindex = dev->ifindex; - if (tipc_mtu_bad(dev, sizeof(struct iphdr) + - sizeof(struct udphdr))) { + b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr); + if (tipc_mtu_bad(dev, b->encap_hlen)) { err = -EINVAL; goto err; } @@ -760,6 +760,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, else udp_conf.local_ip6 = local.ipv6; ub->ifindex = dev->ifindex; + b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr); b->mtu = 1280; #endif } else { diff --git a/net/tls/tls.h b/net/tls/tls.h index 804c3880d028..0672acab2773 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -167,6 +167,11 @@ static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx) return ctx->strp.msg_ready; } +static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx) +{ + return ctx->strp.mixed_decrypted; +} + #ifdef CONFIG_TLS_DEVICE int tls_device_init(void); void tls_device_cleanup(void); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a7cc4f9faac2..bf69c9d6d06c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -1007,20 +1007,14 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx); struct sk_buff *skb = tls_strp_msg(sw_ctx); struct strp_msg *rxm = strp_msg(skb); - int is_decrypted = skb->decrypted; - int is_encrypted = !is_decrypted; - struct sk_buff *skb_iter; - int left; - - left = rxm->full_len - skb->len; - /* Check if all the data is decrypted already */ - skb_iter = skb_shinfo(skb)->frag_list; - while (skb_iter && left > 0) { - is_decrypted &= skb_iter->decrypted; - is_encrypted &= !skb_iter->decrypted; - - left -= skb_iter->len; - skb_iter = skb_iter->next; + int is_decrypted, is_encrypted; + + if (!tls_strp_msg_mixed_decrypted(sw_ctx)) { + is_decrypted = skb->decrypted; + is_encrypted = !is_decrypted; + } else { + is_decrypted = 0; + is_encrypted = 0; } trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len, diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index b32c112984dd..f2e7302a4d96 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -111,7 +111,8 @@ int wait_on_pending_writer(struct sock *sk, long *timeo) break; } - if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) + if (sk_wait_event(sk, timeo, + !READ_ONCE(sk->sk_write_pending), &wait)) break; } remove_wait_queue(sk_sleep(sk), &wait); diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 955ac3e0bf4d..da95abbb7ea3 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -29,34 +29,50 @@ static void tls_strp_anchor_free(struct tls_strparser *strp) struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); - shinfo->frag_list = NULL; + if (!strp->copy_mode) + shinfo->frag_list = NULL; consume_skb(strp->anchor); strp->anchor = NULL; } -/* Create a new skb with the contents of input copied to its page frags */ -static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) +static struct sk_buff * +tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb, + int offset, int len) { - struct strp_msg *rxm; struct sk_buff *skb; - int i, err, offset; + int i, err; - skb = alloc_skb_with_frags(0, strp->stm.full_len, TLS_PAGE_ORDER, + skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER, &err, strp->sk->sk_allocation); if (!skb) return NULL; - offset = strp->stm.offset; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - WARN_ON_ONCE(skb_copy_bits(strp->anchor, offset, + WARN_ON_ONCE(skb_copy_bits(in_skb, offset, skb_frag_address(frag), skb_frag_size(frag))); offset += skb_frag_size(frag); } - skb_copy_header(skb, strp->anchor); + skb->len = len; + skb->data_len = len; + skb_copy_header(skb, in_skb); + return skb; +} + +/* Create a new skb with the contents of input copied to its page frags */ +static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) +{ + struct strp_msg *rxm; + struct sk_buff *skb; + + skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset, + strp->stm.full_len); + if (!skb) + return NULL; + rxm = strp_msg(skb); rxm->offset = 0; return skb; @@ -180,22 +196,22 @@ static void tls_strp_flush_anchor_copy(struct tls_strparser *strp) for (i = 0; i < shinfo->nr_frags; i++) __skb_frag_unref(&shinfo->frags[i], false); shinfo->nr_frags = 0; + if (strp->copy_mode) { + kfree_skb_list(shinfo->frag_list); + shinfo->frag_list = NULL; + } strp->copy_mode = 0; + strp->mixed_decrypted = 0; } -static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, - unsigned int offset, size_t in_len) +static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb, + struct sk_buff *in_skb, unsigned int offset, + size_t in_len) { - struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data; - struct sk_buff *skb; - skb_frag_t *frag; size_t len, chunk; + skb_frag_t *frag; int sz; - if (strp->msg_ready) - return 0; - - skb = strp->anchor; frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; len = in_len; @@ -208,19 +224,26 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, skb_frag_size(frag), chunk)); - sz = tls_rx_msg_size(strp, strp->anchor); - if (sz < 0) { - desc->error = sz; - return 0; - } - - /* We may have over-read, sz == 0 is guaranteed under-read */ - if (sz > 0) - chunk = min_t(size_t, chunk, sz - skb->len); - skb->len += chunk; skb->data_len += chunk; skb_frag_size_add(frag, chunk); + + sz = tls_rx_msg_size(strp, skb); + if (sz < 0) + return sz; + + /* We may have over-read, sz == 0 is guaranteed under-read */ + if (unlikely(sz && sz < skb->len)) { + int over = skb->len - sz; + + WARN_ON_ONCE(over > chunk); + skb->len -= over; + skb->data_len -= over; + skb_frag_size_add(frag, -over); + + chunk -= over; + } + frag++; len -= chunk; offset += chunk; @@ -247,15 +270,99 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, offset += chunk; } - if (strp->stm.full_len == skb->len) { +read_done: + return in_len - len; +} + +static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb, + struct sk_buff *in_skb, unsigned int offset, + size_t in_len) +{ + struct sk_buff *nskb, *first, *last; + struct skb_shared_info *shinfo; + size_t chunk; + int sz; + + if (strp->stm.full_len) + chunk = strp->stm.full_len - skb->len; + else + chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; + chunk = min(chunk, in_len); + + nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk); + if (!nskb) + return -ENOMEM; + + shinfo = skb_shinfo(skb); + if (!shinfo->frag_list) { + shinfo->frag_list = nskb; + nskb->prev = nskb; + } else { + first = shinfo->frag_list; + last = first->prev; + last->next = nskb; + first->prev = nskb; + } + + skb->len += chunk; + skb->data_len += chunk; + + if (!strp->stm.full_len) { + sz = tls_rx_msg_size(strp, skb); + if (sz < 0) + return sz; + + /* We may have over-read, sz == 0 is guaranteed under-read */ + if (unlikely(sz && sz < skb->len)) { + int over = skb->len - sz; + + WARN_ON_ONCE(over > chunk); + skb->len -= over; + skb->data_len -= over; + __pskb_trim(nskb, nskb->len - over); + + chunk -= over; + } + + strp->stm.full_len = sz; + } + + return chunk; +} + +static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, + unsigned int offset, size_t in_len) +{ + struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data; + struct sk_buff *skb; + int ret; + + if (strp->msg_ready) + return 0; + + skb = strp->anchor; + if (!skb->len) + skb_copy_decrypted(skb, in_skb); + else + strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb); + + if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted) + ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len); + else + ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len); + if (ret < 0) { + desc->error = ret; + ret = 0; + } + + if (strp->stm.full_len && strp->stm.full_len == skb->len) { desc->count = 0; strp->msg_ready = 1; tls_rx_msg_ready(strp); } -read_done: - return in_len - len; + return ret; } static int tls_strp_read_copyin(struct tls_strparser *strp) @@ -315,15 +422,19 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) return 0; } -static bool tls_strp_check_no_dup(struct tls_strparser *strp) +static bool tls_strp_check_queue_ok(struct tls_strparser *strp) { unsigned int len = strp->stm.offset + strp->stm.full_len; - struct sk_buff *skb; + struct sk_buff *first, *skb; u32 seq; - skb = skb_shinfo(strp->anchor)->frag_list; - seq = TCP_SKB_CB(skb)->seq; + first = skb_shinfo(strp->anchor)->frag_list; + skb = first; + seq = TCP_SKB_CB(first)->seq; + /* Make sure there's no duplicate data in the queue, + * and the decrypted status matches. + */ while (skb->len < len) { seq += skb->len; len -= skb->len; @@ -331,6 +442,8 @@ static bool tls_strp_check_no_dup(struct tls_strparser *strp) if (TCP_SKB_CB(skb)->seq != seq) return false; + if (skb_cmp_decrypted(first, skb)) + return false; } return true; @@ -411,7 +524,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp) return tls_strp_read_copy(strp, true); } - if (!tls_strp_check_no_dup(strp)) + if (!tls_strp_check_queue_ok(strp)) return tls_strp_read_copy(strp, false); strp->msg_ready = 1; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 635b8bf6b937..6e6a7c37d685 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -2304,10 +2304,14 @@ static void tls_data_ready(struct sock *sk) struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct sk_psock *psock; + gfp_t alloc_save; trace_sk_data_ready(sk); + alloc_save = sk->sk_allocation; + sk->sk_allocation = GFP_ATOMIC; tls_strp_data_ready(&ctx->strp); + sk->sk_allocation = alloc_save; psock = sk_psock_get(sk); if (psock) { diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fb31e8a4409e..e7728b57a8c7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -603,7 +603,7 @@ static void unix_release_sock(struct sock *sk, int embrion) /* Clear state */ unix_state_lock(sk); sock_orphan(sk); - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); path = u->path; u->path.dentry = NULL; u->path.mnt = NULL; @@ -628,7 +628,7 @@ static void unix_release_sock(struct sock *sk, int embrion) if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { unix_state_lock(skpair); /* No more writes */ - skpair->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) WRITE_ONCE(skpair->sk_err, ECONNRESET); unix_state_unlock(skpair); @@ -1442,7 +1442,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo) sched = !sock_flag(other, SOCK_DEAD) && !(other->sk_shutdown & RCV_SHUTDOWN) && - unix_recvq_full(other); + unix_recvq_full_lockless(other); unix_state_unlock(other); @@ -2553,7 +2553,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { struct unix_sock *u = unix_sk(sk); struct sk_buff *skb; - int err, copied; + int err; mutex_lock(&u->iolock); skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); @@ -2561,10 +2561,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor) if (!skb) return err; - copied = recv_actor(sk, skb); - kfree_skb(skb); - - return copied; + return recv_actor(sk, skb); } /* @@ -3008,7 +3005,7 @@ static int unix_shutdown(struct socket *sock, int mode) ++mode; unix_state_lock(sk); - sk->sk_shutdown |= mode; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode); other = unix_peer(sk); if (other) sock_hold(other); @@ -3028,7 +3025,7 @@ static int unix_shutdown(struct socket *sock, int mode) if (mode&SEND_SHUTDOWN) peer_mode |= RCV_SHUTDOWN; unix_state_lock(other); - other->sk_shutdown |= peer_mode; + WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode); unix_state_unlock(other); other->sk_state_change(other); if (peer_mode == SHUTDOWN_MASK) @@ -3160,16 +3157,18 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa { struct sock *sk = sock->sk; __poll_t mask; + u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; + shutdown = READ_ONCE(sk->sk_shutdown); /* exceptional events? */ if (READ_ONCE(sk->sk_err)) mask |= EPOLLERR; - if (sk->sk_shutdown == SHUTDOWN_MASK) + if (shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; /* readable? */ @@ -3203,9 +3202,11 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, struct sock *sk = sock->sk, *other; unsigned int writable; __poll_t mask; + u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; + shutdown = READ_ONCE(sk->sk_shutdown); /* exceptional events? */ if (READ_ONCE(sk->sk_err) || @@ -3213,9 +3214,9 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; - if (sk->sk_shutdown == SHUTDOWN_MASK) + if (shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; /* readable? */ diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 413407bb646c..efb8a0937a13 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1462,7 +1462,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr, vsock_transport_cancel_pkt(vsk); vsock_remove_connected(vsk); goto out_wait; - } else if (timeout == 0) { + } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) { err = -ETIMEDOUT; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index e4878551f140..b769fc258931 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -1441,7 +1441,6 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto struct sock *sk = sk_vsock(vsk); struct sk_buff *skb; int off = 0; - int copied; int err; spin_lock_bh(&vvs->rx_lock); @@ -1454,9 +1453,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto if (!skb) return err; - copied = recv_actor(sk, skb); - kfree_skb(skb); - return copied; + return recv_actor(sk, skb); } EXPORT_SYMBOL_GPL(virtio_transport_read_skb); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index a1382255fab3..c501db7bbdb3 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -5,7 +5,7 @@ * Copyright 2008 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2016 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include <linux/kernel.h> #include <linux/slab.h> @@ -540,6 +540,10 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry, /* skip the TBTT offset */ pos++; + /* ignore entries with invalid BSSID */ + if (!is_valid_ether_addr(pos)) + return -EINVAL; + memcpy(entry->bssid, pos, ETH_ALEN); pos += ETH_ALEN; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index bef28c6187eb..408f5e55744e 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -378,7 +378,7 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, break; default: xdo->dev = NULL; - dev_put(dev); + netdev_put(dev, &xdo->dev_tracker); NL_SET_ERR_MSG(extack, "Unrecognized offload direction"); return -EINVAL; } diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index 35279c220bd7..1f99dc469027 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -310,52 +310,6 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet) skb->mark = 0; } -static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi, - int encap_type, unsigned short family) -{ - struct sec_path *sp; - - sp = skb_sec_path(skb); - if (sp && (sp->len || sp->olen) && - !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) - goto discard; - - XFRM_SPI_SKB_CB(skb)->family = family; - if (family == AF_INET) { - XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); - XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; - } else { - XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); - XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; - } - - return xfrm_input(skb, nexthdr, spi, encap_type); -discard: - kfree_skb(skb); - return 0; -} - -static int xfrmi4_rcv(struct sk_buff *skb) -{ - return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET); -} - -static int xfrmi6_rcv(struct sk_buff *skb) -{ - return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff], - 0, 0, AF_INET6); -} - -static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) -{ - return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET); -} - -static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) -{ - return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6); -} - static int xfrmi_rcv_cb(struct sk_buff *skb, int err) { const struct xfrm_mode *inner_mode; @@ -991,8 +945,8 @@ static struct pernet_operations xfrmi_net_ops = { }; static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = { - .handler = xfrmi6_rcv, - .input_handler = xfrmi6_input, + .handler = xfrm6_rcv, + .input_handler = xfrm_input, .cb_handler = xfrmi_rcv_cb, .err_handler = xfrmi6_err, .priority = 10, @@ -1042,8 +996,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = { #endif static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = { - .handler = xfrmi4_rcv, - .input_handler = xfrmi4_input, + .handler = xfrm4_rcv, + .input_handler = xfrm_input, .cb_handler = xfrmi_rcv_cb, .err_handler = xfrmi4_err, .priority = 10, diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5c61ec04b839..6d15788b5123 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -3312,7 +3312,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) static inline int xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, - unsigned short family) + unsigned short family, u32 if_id) { if (xfrm_state_kern(x)) return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); @@ -3323,7 +3323,8 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && !(x->props.mode != XFRM_MODE_TRANSPORT && - xfrm_state_addr_cmp(tmpl, x, family)); + xfrm_state_addr_cmp(tmpl, x, family)) && + (if_id == 0 || if_id == x->if_id); } /* @@ -3335,7 +3336,7 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, */ static inline int xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, - unsigned short family) + unsigned short family, u32 if_id) { int idx = start; @@ -3345,7 +3346,7 @@ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int star } else start = -1; for (; idx < sp->len; idx++) { - if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) + if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id)) return ++idx; if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { if (start == -1) @@ -3712,12 +3713,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, } xfrm_nr = ti; - if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK && - !xfrm_nr) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); - goto reject; - } - if (npols > 1) { xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); tpp = stp; @@ -3730,7 +3725,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, * are implied between each two transformations. */ for (i = xfrm_nr-1, k = 0; i >= 0; i--) { - k = xfrm_policy_ok(tpp[i], sp, k, family); + k = xfrm_policy_ok(tpp[i], sp, k, family, if_id); if (k < 0) { if (k < -1) /* "-2 - errored_index" returned */ @@ -3745,9 +3740,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, goto reject; } - if (if_id) - secpath_reset(skb); - xfrm_pols_put(pols, npols); return 1; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index d720e163ae6e..c34a2a06ca94 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1770,7 +1770,7 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, } static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family, - struct netlink_ext_ack *extack) + int dir, struct netlink_ext_ack *extack) { u16 prev_family; int i; @@ -1796,6 +1796,10 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family, switch (ut[i].mode) { case XFRM_MODE_TUNNEL: case XFRM_MODE_BEET: + if (ut[i].optional && dir == XFRM_POLICY_OUT) { + NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy"); + return -EINVAL; + } break; default: if (ut[i].family != prev_family) { @@ -1833,7 +1837,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family, } static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs, - struct netlink_ext_ack *extack) + int dir, struct netlink_ext_ack *extack) { struct nlattr *rt = attrs[XFRMA_TMPL]; @@ -1844,7 +1848,7 @@ static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs, int nr = nla_len(rt) / sizeof(*utmpl); int err; - err = validate_tmpl(nr, utmpl, pol->family, extack); + err = validate_tmpl(nr, utmpl, pol->family, dir, extack); if (err) return err; @@ -1921,7 +1925,7 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, if (err) goto error; - if (!(err = copy_from_user_tmpl(xp, attrs, extack))) + if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack))) err = copy_from_user_sec_ctx(xp, attrs); if (err) goto error; @@ -1980,6 +1984,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) { xfrm_dev_policy_delete(xp); + xfrm_dev_policy_free(xp); security_xfrm_policy_free(xp->security); kfree(xp); return err; @@ -3499,7 +3504,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, return NULL; nr = ((len - sizeof(*p)) / sizeof(*ut)); - if (validate_tmpl(nr, ut, p->sel.family, NULL)) + if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL)) return NULL; if (p->dir > XFRM_POLICY_OUT) |