summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c30
-rw-r--r--net/8021q/vlanproc.c46
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile2
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/main.c14
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/netlink.c1
-rw-r--r--net/bluetooth/6lowpan.c4
-rw-r--r--net/bluetooth/Kconfig8
-rw-r--r--net/bluetooth/Makefile1
-rw-r--r--net/bluetooth/a2mp.c1054
-rw-r--r--net/bluetooth/a2mp.h154
-rw-r--r--net/bluetooth/af_bluetooth.c10
-rw-r--r--net/bluetooth/amp.c590
-rw-r--r--net/bluetooth/amp.h60
-rw-r--r--net/bluetooth/bnep/core.c5
-rw-r--r--net/bluetooth/eir.c29
-rw-r--r--net/bluetooth/hci_conn.c200
-rw-r--r--net/bluetooth/hci_core.c170
-rw-r--r--net/bluetooth/hci_event.c236
-rw-r--r--net/bluetooth/hci_request.c2
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/bluetooth/hci_sync.c433
-rw-r--r--net/bluetooth/iso.c104
-rw-r--r--net/bluetooth/l2cap_core.c1079
-rw-r--r--net/bluetooth/l2cap_sock.c21
-rw-r--r--net/bluetooth/mgmt.c120
-rw-r--r--net/bluetooth/msft.c3
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c36
-rw-r--r--net/bpf/test_run.c12
-rw-r--r--net/bridge/br.c15
-rw-r--r--net/bridge/br_device.c27
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_netlink.c3
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/bridge/netfilter/Kconfig7
-rw-r--r--net/bridge/netfilter/Makefile2
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/can/bcm.c69
-rw-r--r--net/can/isotp.c5
-rw-r--r--net/can/raw.c104
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c362
-rw-r--r--net/core/dev.h7
-rw-r--r--net/core/dst.c6
-rw-r--r--net/core/filter.c173
-rw-r--r--net/core/gro.c40
-rw-r--r--net/core/gro_cells.c3
-rw-r--r--net/core/gso.c4
-rw-r--r--net/core/hotdata.c22
-rw-r--r--net/core/link_watch.c13
-rw-r--r--net/core/net-procfs.c55
-rw-r--r--net/core/net-sysfs.c137
-rw-r--r--net/core/net_namespace.c33
-rw-r--r--net/core/netdev-genl-gen.c12
-rw-r--r--net/core/netdev-genl-gen.h2
-rw-r--r--net/core/netdev-genl.c227
-rw-r--r--net/core/page_pool.c64
-rw-r--r--net/core/page_pool_user.c2
-rw-r--r--net/core/rtnetlink.c130
-rw-r--r--net/core/scm.c5
-rw-r--r--net/core/skbuff.c196
-rw-r--r--net/core/sock.c82
-rw-r--r--net/core/sock_diag.c120
-rw-r--r--net/core/sysctl_net_core.c34
-rw-r--r--net/core/xdp.c15
-rw-r--r--net/dccp/ackvec.c8
-rw-r--r--net/dccp/diag.c1
-rw-r--r--net/devlink/netlink_gen.c2
-rw-r--r--net/dsa/dsa.c7
-rw-r--r--net/dsa/tag_sja1105.c4
-rw-r--r--net/dsa/user.c28
-rw-r--r--net/ethtool/eee.c62
-rw-r--r--net/ethtool/ioctl.c60
-rw-r--r--net/ethtool/netlink.c14
-rw-r--r--net/hsr/hsr_device.c30
-rw-r--r--net/ieee802154/6lowpan/core.c3
-rw-r--r--net/ieee802154/socket.c1
-rw-r--r--net/ieee802154/sysfs.c2
-rw-r--r--net/ieee802154/sysfs.h2
-rw-r--r--net/ipv4/af_inet.c53
-rw-r--r--net/ipv4/bpf_tcp_ca.c26
-rw-r--r--net/ipv4/cipso_ipv4.c5
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c309
-rw-r--r--net/ipv4/fib_frontend.c51
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/fou_bpf.c4
-rw-r--r--net/ipv4/fou_core.c2
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/inet_diag.c101
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/ip_gre.c24
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c13
-rw-r--r--net/ipv4/ip_tunnel.c53
-rw-r--r--net/ipv4/ip_vti.c8
-rw-r--r--net/ipv4/ipip.c8
-rw-r--r--net/ipv4/ipmr.c13
-rw-r--r--net/ipv4/netfilter/Kconfig44
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/nexthop.c367
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/raw.c20
-rw-r--r--net/ipv4/raw_diag.c1
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/syncookies.c61
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_ao.c2
-rw-r--r--net/ipv4/tcp_bbr.c4
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_cubic.c4
-rw-r--r--net/ipv4/tcp_dctcp.c4
-rw-r--r--net/ipv4/tcp_diag.c1
-rw-r--r--net/ipv4/tcp_input.c51
-rw-r--r--net/ipv4/tcp_ipv4.c17
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_offload.c36
-rw-r--r--net/ipv4/udp.c14
-rw-r--r--net/ipv4/udp_diag.c2
-rw-r--r--net/ipv4/udp_offload.c17
-rw-r--r--net/ipv4/xfrm4_input.c2
-rw-r--r--net/ipv6/addrconf.c782
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/anycast.c61
-rw-r--r--net/ipv6/calipso.c5
-rw-r--r--net/ipv6/exthdrs.c34
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/ioam6.c72
-rw-r--r--net/ipv6/ip6_fib.c92
-rw-r--r--net/ipv6/ip6_gre.c14
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c18
-rw-r--r--net/ipv6/ip6_output.c12
-rw-r--r--net/ipv6/ip6_tunnel.c25
-rw-r--r--net/ipv6/ip6_vti.c13
-rw-r--r--net/ipv6/ip6mr.c9
-rw-r--r--net/ipv6/ipv6_sockglue.c8
-rw-r--r--net/ipv6/mcast.c15
-rw-r--r--net/ipv6/ndisc.c84
-rw-r--r--net/ipv6/netfilter/Kconfig20
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c4
-rw-r--r--net/ipv6/output_core.c4
-rw-r--r--net/ipv6/raw.c22
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c47
-rw-r--r--net/ipv6/seg6_hmac.c8
-rw-r--r--net/ipv6/sit.c27
-rw-r--r--net/ipv6/syncookies.c31
-rw-r--r--net/ipv6/tcp_ipv6.c39
-rw-r--r--net/ipv6/tcpv6_offload.c16
-rw-r--r--net/ipv6/udp.c24
-rw-r--r--net/ipv6/udp_offload.c21
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c5
-rw-r--r--net/iucv/af_iucv.c10
-rw-r--r--net/iucv/iucv.c15
-rw-r--r--net/kcm/kcmsock.c18
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/agg-tx.c2
-rw-r--r--net/mac80211/cfg.c374
-rw-r--r--net/mac80211/chan.c708
-rw-r--r--net/mac80211/debug.h18
-rw-r--r--net/mac80211/debugfs.c3
-rw-r--r--net/mac80211/driver-ops.c14
-rw-r--r--net/mac80211/driver-ops.h27
-rw-r--r--net/mac80211/ht.c6
-rw-r--r--net/mac80211/ibss.c55
-rw-r--r--net/mac80211/ieee80211_i.h205
-rw-r--r--net/mac80211/iface.c36
-rw-r--r--net/mac80211/key.c20
-rw-r--r--net/mac80211/link.c15
-rw-r--r--net/mac80211/main.c231
-rw-r--r--net/mac80211/mesh.c162
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_plink.c28
-rw-r--r--net/mac80211/mlme.c3040
-rw-r--r--net/mac80211/ocb.c5
-rw-r--r--net/mac80211/offchannel.c21
-rw-r--r--net/mac80211/parse.c971
-rw-r--r--net/mac80211/rate.c14
-rw-r--r--net/mac80211/rx.c53
-rw-r--r--net/mac80211/scan.c64
-rw-r--r--net/mac80211/spectmgmt.c337
-rw-r--r--net/mac80211/sta_info.c21
-rw-r--r--net/mac80211/sta_info.h20
-rw-r--r--net/mac80211/tdls.c73
-rw-r--r--net/mac80211/tests/elems.c5
-rw-r--r--net/mac80211/trace.h201
-rw-r--r--net/mac80211/trace_msg.h2
-rw-r--r--net/mac80211/tx.c60
-rw-r--r--net/mac80211/util.c1794
-rw-r--r--net/mac80211/vht.c52
-rw-r--r--net/mac80211/wpa.c33
-rw-r--r--net/mac802154/llsec.c18
-rw-r--r--net/mctp/Kconfig1
-rw-r--r--net/mctp/af_mctp.c117
-rw-r--r--net/mctp/route.c105
-rw-r--r--net/mctp/test/route-test.c413
-rw-r--r--net/mctp/test/utils.c2
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/mpls/mpls_gso.c3
-rw-r--r--net/mpls/mpls_iptunnel.c2
-rw-r--r--net/mptcp/diag.c1
-rw-r--r--net/mptcp/mptcp_diag.c2
-rw-r--r--net/mptcp/mptcp_pm_gen.c7
-rw-r--r--net/mptcp/mptcp_pm_gen.h2
-rw-r--r--net/mptcp/options.c20
-rw-r--r--net/mptcp/pm.c29
-rw-r--r--net/mptcp/pm_netlink.c115
-rw-r--r--net/mptcp/pm_userspace.c221
-rw-r--r--net/mptcp/protocol.c107
-rw-r--r--net/mptcp/protocol.h93
-rw-r--r--net/mptcp/sockopt.c73
-rw-r--r--net/mptcp/subflow.c12
-rw-r--r--net/mptcp/token_test.c7
-rw-r--r--net/netfilter/Kconfig12
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/nf_bpf_link.c2
-rw-r--r--net/netfilter/nf_conncount.c8
-rw-r--r--net/netfilter/nf_conntrack_bpf.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nf_nat_bpf.c4
-rw-r--r--net/netfilter/nf_queue.c106
-rw-r--r--net/netfilter/nf_synproxy_core.c2
-rw-r--r--net/netfilter/nf_tables_api.c35
-rw-r--r--net/netfilter/nfnetlink_queue.c142
-rw-r--r--net/netfilter/nft_osf.c11
-rw-r--r--net/netfilter/nft_set_pipapo.c193
-rw-r--r--net/netfilter/nft_set_pipapo.h37
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c59
-rw-r--r--net/netfilter/utils.c37
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netlabel/netlabel_kapi.c10
-rw-r--r--net/netlink/af_netlink.c76
-rw-r--r--net/netlink/af_netlink.h5
-rw-r--r--net/netlink/diag.c3
-rw-r--r--net/netlink/genetlink.c42
-rw-r--r--net/nfc/core.c2
-rw-r--r--net/nfc/hci/llc.c20
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/packet/diag.c3
-rw-r--r--net/rds/connection.c4
-rw-r--r--net/rxrpc/af_rxrpc.c12
-rw-r--r--net/rxrpc/ar-internal.h88
-rw-r--r--net/rxrpc/call_event.c327
-rw-r--r--net/rxrpc/call_object.c56
-rw-r--r--net/rxrpc/conn_client.c4
-rw-r--r--net/rxrpc/conn_event.c16
-rw-r--r--net/rxrpc/conn_object.c4
-rw-r--r--net/rxrpc/input.c116
-rw-r--r--net/rxrpc/insecure.c11
-rw-r--r--net/rxrpc/io_thread.c11
-rw-r--r--net/rxrpc/local_object.c3
-rw-r--r--net/rxrpc/misc.c8
-rw-r--r--net/rxrpc/output.c441
-rw-r--r--net/rxrpc/proc.c10
-rw-r--r--net/rxrpc/protocol.h6
-rw-r--r--net/rxrpc/rtt.c36
-rw-r--r--net/rxrpc/rxkad.c57
-rw-r--r--net/rxrpc/sendmsg.c63
-rw-r--r--net/rxrpc/sysctl.c16
-rw-r--r--net/rxrpc/txbuf.c174
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_bpf.c1
-rw-r--r--net/sched/act_connmark.c1
-rw-r--r--net/sched/act_csum.c1
-rw-r--r--net/sched/act_ct.c1
-rw-r--r--net/sched/act_ctinfo.c1
-rw-r--r--net/sched/act_gact.c1
-rw-r--r--net/sched/act_gate.c1
-rw-r--r--net/sched/act_ife.c1
-rw-r--r--net/sched/act_mirred.c1
-rw-r--r--net/sched/act_mpls.c1
-rw-r--r--net/sched/act_nat.c1
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c1
-rw-r--r--net/sched/act_sample.c1
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/act_skbedit.c1
-rw-r--r--net/sched/act_skbmod.c1
-rw-r--r--net/sched/act_tunnel_key.c1
-rw-r--r--net/sched/act_vlan.c1
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_bpf.c1
-rw-r--r--net/sched/cls_cgroup.c1
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_flower.c1
-rw-r--r--net/sched/cls_fw.c1
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/cls_route.c1
-rw-r--r--net/sched/cls_u32.c1
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cake.c1
-rw-r--r--net/sched/sch_cbs.c1
-rw-r--r--net/sched/sch_choke.c1
-rw-r--r--net/sched/sch_codel.c33
-rw-r--r--net/sched/sch_drr.c1
-rw-r--r--net/sched/sch_etf.c1
-rw-r--r--net/sched/sch_ets.c1
-rw-r--r--net/sched/sch_fq.c1
-rw-r--r--net/sched/sch_fq_codel.c1
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sched/sch_gred.c1
-rw-r--r--net/sched/sch_hfsc.c1
-rw-r--r--net/sched/sch_hhf.c1
-rw-r--r--net/sched/sch_htb.c1
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_mqprio.c1
-rw-r--r--net/sched/sch_multiq.c1
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_pie.c1
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_prio.c1
-rw-r--r--net/sched/sch_qfq.c1
-rw-r--r--net/sched/sch_red.c1
-rw-r--r--net/sched/sch_sfb.c1
-rw-r--r--net/sched/sch_sfq.c1
-rw-r--r--net/sched/sch_skbprio.c1
-rw-r--r--net/sched/sch_taprio.c73
-rw-r--r--net/sched/sch_tbf.c1
-rw-r--r--net/sctp/diag.c1
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/smc/af_smc.c22
-rw-r--r--net/smc/smc.h4
-rw-r--r--net/smc/smc_clc.c6
-rw-r--r--net/smc/smc_clc.h2
-rw-r--r--net/smc/smc_core.c4
-rw-r--r--net/smc/smc_diag.c1
-rw-r--r--net/smc/smc_ism.h10
-rw-r--r--net/smc/smc_pnet.c10
-rw-r--r--net/socket.c2
-rw-r--r--net/tipc/Kconfig7
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/bearer.c15
-rw-r--r--net/tipc/diag.c1
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/unix/Kconfig5
-rw-r--r--net/unix/Makefile2
-rw-r--r--net/unix/af_unix.c73
-rw-r--r--net/unix/diag.c1
-rw-r--r--net/unix/garbage.c200
-rw-r--r--net/unix/scm.c159
-rw-r--r--net/unix/scm.h10
-rw-r--r--net/vmw_vsock/diag.c1
-rw-r--r--net/wireless/chan.c377
-rw-r--r--net/wireless/core.h52
-rw-r--r--net/wireless/mlme.c146
-rw-r--r--net/wireless/nl80211.c389
-rw-r--r--net/wireless/reg.c17
-rw-r--r--net/wireless/scan.c788
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/wireless/tests/Makefile2
-rw-r--r--net/wireless/tests/chan.c228
-rw-r--r--net/wireless/tests/fragmentation.c30
-rw-r--r--net/wireless/tests/scan.c277
-rw-r--r--net/wireless/trace.h62
-rw-r--r--net/wireless/util.c90
-rw-r--r--net/x25/Kconfig2
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--net/xdp/xsk_diag.c1
-rw-r--r--net/xfrm/espintcp.c4
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_interface_bpf.c4
-rw-r--r--net/xfrm/xfrm_interface_core.c26
-rw-r--r--net/xfrm/xfrm_policy.c147
-rw-r--r--net/xfrm/xfrm_proc.c1
-rw-r--r--net/xfrm/xfrm_state.c17
-rw-r--r--net/xfrm/xfrm_state_bpf.c4
-rw-r--r--net/xfrm/xfrm_user.c2
389 files changed, 13482 insertions, 11915 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 407b2335f091..39876eff51d2 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -504,28 +504,6 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
}
-/*
- * vlan network devices have devices nesting below it, and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key vlan_netdev_xmit_lock_key;
-static struct lock_class_key vlan_netdev_addr_lock_key;
-
-static void vlan_dev_set_lockdep_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
-}
-
-static void vlan_dev_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &vlan_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
-}
-
static __be16 vlan_parse_protocol(const struct sk_buff *skb)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
@@ -559,7 +537,7 @@ static const struct header_ops vlan_passthru_header_ops = {
.parse_protocol = vlan_parse_protocol,
};
-static struct device_type vlan_type = {
+static const struct device_type vlan_type = {
.name = "vlan",
};
@@ -627,7 +605,7 @@ static int vlan_dev_init(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- vlan_dev_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
@@ -784,9 +762,9 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
static int vlan_dev_get_iflink(const struct net_device *dev)
{
- struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
- return real_dev->ifindex;
+ return READ_ONCE(real_dev->ifindex);
}
static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 7825c129742a..87b959da00cd 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -163,48 +163,34 @@ void vlan_proc_rem_dev(struct net_device *vlandev)
* The following few functions build the content of /proc/net/vlan/config
*/
-/* start read of /proc/net/vlan/config */
-static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(rcu)
+static void *vlan_seq_from_index(struct seq_file *seq, loff_t *pos)
{
+ unsigned long ifindex = *pos;
struct net_device *dev;
- struct net *net = seq_file_net(seq);
- loff_t i = 1;
-
- rcu_read_lock();
- if (*pos == 0)
- return SEQ_START_TOKEN;
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
if (!is_vlan_dev(dev))
continue;
-
- if (i++ == *pos)
- return dev;
+ *pos = dev->ifindex;
+ return dev;
}
+ return NULL;
+}
+
+static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
+{
+ rcu_read_lock();
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
- return NULL;
+ return vlan_seq_from_index(seq, pos);
}
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net_device *dev;
- struct net *net = seq_file_net(seq);
-
++*pos;
-
- dev = v;
- if (v == SEQ_START_TOKEN)
- dev = net_device_entry(&net->dev_base_head);
-
- for_each_netdev_continue_rcu(net, dev) {
- if (!is_vlan_dev(dev))
- continue;
-
- return dev;
- }
-
- return NULL;
+ return vlan_seq_from_index(seq, pos);
}
static void vlan_seq_stop(struct seq_file *seq, void *v)
diff --git a/net/Kconfig b/net/Kconfig
index 4adc47d0c9c2..3e57ccf0da27 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -331,6 +331,7 @@ config NET_RX_BUSY_POLL
config BQL
bool
+ prompt "Enable Byte Queue Limits"
depends on SYSFS
select DQL
default y
diff --git a/net/Makefile b/net/Makefile
index b06b5539e7a6..65bb8c72a35e 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_INET) += ipv4/
obj-$(CONFIG_TLS) += tls/
obj-$(CONFIG_XFRM) += xfrm/
-obj-$(CONFIG_UNIX_SCM) += unix/
+obj-$(CONFIG_UNIX) += unix/
obj-y += ipv6/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 28a939d56090..4c7e85534324 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -684,7 +684,7 @@ static bool batadv_dat_forward_data(struct batadv_priv *bat_priv,
cand = batadv_dat_select_candidates(bat_priv, ip, vid);
if (!cand)
- goto out;
+ return ret;
batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
@@ -728,7 +728,6 @@ free_orig:
batadv_orig_node_put(cand[i].orig_node);
}
-out:
kfree(cand);
return ret;
}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 5fc754b0b3f7..75119f1ffccc 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -691,29 +691,31 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
"%s%s", BATADV_UEV_TYPE_VAR,
batadv_uev_type_str[type]);
if (!uevent_env[0])
- goto out;
+ goto report_error;
uevent_env[1] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_ACTION_VAR,
batadv_uev_action_str[action]);
if (!uevent_env[1])
- goto out;
+ goto free_first_env;
/* If the event is DEL, ignore the data field */
if (action != BATADV_UEV_DEL) {
uevent_env[2] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_DATA_VAR, data);
if (!uevent_env[2])
- goto out;
+ goto free_second_env;
}
ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
-out:
- kfree(uevent_env[0]);
- kfree(uevent_env[1]);
kfree(uevent_env[2]);
+free_second_env:
+ kfree(uevent_env[1]);
+free_first_env:
+ kfree(uevent_env[0]);
if (ret)
+report_error:
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
batadv_uev_type_str[type],
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 870dcd7f1786..8ca854a75a32 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2024.0"
+#define BATADV_SOURCE_VERSION "2024.1"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 1f7ed9d4f6fd..0954757f0b8b 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -15,7 +15,6 @@
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/export.h>
#include <linux/genetlink.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 4eb1b3ced0d2..27520a8a486f 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -572,7 +572,7 @@ static void netdev_setup(struct net_device *dev)
dev->needs_free_netdev = true;
}
-static struct device_type bt_type = {
+static const struct device_type bt_type = {
.name = "bluetooth",
};
@@ -892,7 +892,7 @@ static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
chan->ops = &bt_6lowpan_chan_ops;
err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
- addr, dst_type);
+ addr, dst_type, L2CAP_CONN_TIMEOUT);
BT_DBG("chan %p err %d", chan, err);
if (err < 0)
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index da7cac0a1b71..6b2b65a66700 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -62,14 +62,6 @@ source "net/bluetooth/cmtp/Kconfig"
source "net/bluetooth/hidp/Kconfig"
-config BT_HS
- bool "Bluetooth High Speed (HS) features"
- depends on BT_BREDR
- help
- Bluetooth High Speed includes support for off-loading
- Bluetooth connections via 802.11 (wifi) physical layer
- available with Bluetooth version 3.0 or later.
-
config BT_LE
bool "Bluetooth Low Energy (LE) features"
depends on BT
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 141ac1fda0bf..628d448d78be 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -21,7 +21,6 @@ bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_LE) += iso.o
-bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
bluetooth-$(CONFIG_BT_LEDS) += leds.o
bluetooth-$(CONFIG_BT_MSFTEXT) += msft.o
bluetooth-$(CONFIG_BT_AOSPEXT) += aosp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
deleted file mode 100644
index e7adb8a98cf9..000000000000
--- a/net/bluetooth/a2mp.c
+++ /dev/null
@@ -1,1054 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
-
-#include "hci_request.h"
-#include "a2mp.h"
-#include "amp.h"
-
-#define A2MP_FEAT_EXT 0x8000
-
-/* Global AMP Manager list */
-static LIST_HEAD(amp_mgr_list);
-static DEFINE_MUTEX(amp_mgr_list_lock);
-
-/* A2MP build & send command helper functions */
-static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
-{
- struct a2mp_cmd *cmd;
- int plen;
-
- plen = sizeof(*cmd) + len;
- cmd = kzalloc(plen, GFP_KERNEL);
- if (!cmd)
- return NULL;
-
- cmd->code = code;
- cmd->ident = ident;
- cmd->len = cpu_to_le16(len);
-
- memcpy(cmd->data, data, len);
-
- return cmd;
-}
-
-static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
-{
- struct l2cap_chan *chan = mgr->a2mp_chan;
- struct a2mp_cmd *cmd;
- u16 total_len = len + sizeof(*cmd);
- struct kvec iv;
- struct msghdr msg;
-
- cmd = __a2mp_build(code, ident, len, data);
- if (!cmd)
- return;
-
- iv.iov_base = cmd;
- iv.iov_len = total_len;
-
- memset(&msg, 0, sizeof(msg));
-
- iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len);
-
- l2cap_chan_send(chan, &msg, total_len);
-
- kfree(cmd);
-}
-
-static u8 __next_ident(struct amp_mgr *mgr)
-{
- if (++mgr->ident == 0)
- mgr->ident = 1;
-
- return mgr->ident;
-}
-
-static struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
-{
- struct amp_mgr *mgr;
-
- mutex_lock(&amp_mgr_list_lock);
- list_for_each_entry(mgr, &amp_mgr_list, list) {
- if (test_and_clear_bit(state, &mgr->state)) {
- amp_mgr_get(mgr);
- mutex_unlock(&amp_mgr_list_lock);
- return mgr;
- }
- }
- mutex_unlock(&amp_mgr_list_lock);
-
- return NULL;
-}
-
-/* hci_dev_list shall be locked */
-static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
-{
- struct hci_dev *hdev;
- int i = 1;
-
- cl[0].id = AMP_ID_BREDR;
- cl[0].type = AMP_TYPE_BREDR;
- cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->dev_type == HCI_AMP) {
- cl[i].id = hdev->id;
- cl[i].type = hdev->amp_type;
- if (test_bit(HCI_UP, &hdev->flags))
- cl[i].status = hdev->amp_status;
- else
- cl[i].status = AMP_STATUS_POWERED_DOWN;
- i++;
- }
- }
-}
-
-/* Processing A2MP messages */
-static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_cmd_rej *rej = (void *) skb->data;
-
- if (le16_to_cpu(hdr->len) < sizeof(*rej))
- return -EINVAL;
-
- BT_DBG("ident %u reason %d", hdr->ident, le16_to_cpu(rej->reason));
-
- skb_pull(skb, sizeof(*rej));
-
- return 0;
-}
-
-static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_discov_req *req = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct a2mp_discov_rsp *rsp;
- u16 ext_feat;
- u8 num_ctrl;
- struct hci_dev *hdev;
-
- if (len < sizeof(*req))
- return -EINVAL;
-
- skb_pull(skb, sizeof(*req));
-
- ext_feat = le16_to_cpu(req->ext_feat);
-
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
-
- /* check that packet is not broken for now */
- while (ext_feat & A2MP_FEAT_EXT) {
- if (len < sizeof(ext_feat))
- return -EINVAL;
-
- ext_feat = get_unaligned_le16(skb->data);
- BT_DBG("efm 0x%4.4x", ext_feat);
- len -= sizeof(ext_feat);
- skb_pull(skb, sizeof(ext_feat));
- }
-
- read_lock(&hci_dev_list_lock);
-
- /* at minimum the BR/EDR needs to be listed */
- num_ctrl = 1;
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->dev_type == HCI_AMP)
- num_ctrl++;
- }
-
- len = struct_size(rsp, cl, num_ctrl);
- rsp = kmalloc(len, GFP_ATOMIC);
- if (!rsp) {
- read_unlock(&hci_dev_list_lock);
- return -ENOMEM;
- }
-
- rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- rsp->ext_feat = 0;
-
- __a2mp_add_cl(mgr, rsp->cl);
-
- read_unlock(&hci_dev_list_lock);
-
- a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
-
- kfree(rsp);
- return 0;
-}
-
-static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_discov_rsp *rsp = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct a2mp_cl *cl;
- u16 ext_feat;
- bool found = false;
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- len -= sizeof(*rsp);
- skb_pull(skb, sizeof(*rsp));
-
- ext_feat = le16_to_cpu(rsp->ext_feat);
-
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat);
-
- /* check that packet is not broken for now */
- while (ext_feat & A2MP_FEAT_EXT) {
- if (len < sizeof(ext_feat))
- return -EINVAL;
-
- ext_feat = get_unaligned_le16(skb->data);
- BT_DBG("efm 0x%4.4x", ext_feat);
- len -= sizeof(ext_feat);
- skb_pull(skb, sizeof(ext_feat));
- }
-
- cl = (void *) skb->data;
- while (len >= sizeof(*cl)) {
- BT_DBG("Remote AMP id %u type %u status %u", cl->id, cl->type,
- cl->status);
-
- if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
- struct a2mp_info_req req;
-
- found = true;
-
- memset(&req, 0, sizeof(req));
-
- req.id = cl->id;
- a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
- sizeof(req), &req);
- }
-
- len -= sizeof(*cl);
- cl = skb_pull(skb, sizeof(*cl));
- }
-
- /* Fall back to L2CAP init sequence */
- if (!found) {
- struct l2cap_conn *conn = mgr->l2cap_conn;
- struct l2cap_chan *chan;
-
- mutex_lock(&conn->chan_lock);
-
- list_for_each_entry(chan, &conn->chan_l, list) {
-
- BT_DBG("chan %p state %s", chan,
- state_to_string(chan->state));
-
- if (chan->scid == L2CAP_CID_A2MP)
- continue;
-
- l2cap_chan_lock(chan);
-
- if (chan->state == BT_CONNECT)
- l2cap_send_conn_req(chan);
-
- l2cap_chan_unlock(chan);
- }
-
- mutex_unlock(&conn->chan_lock);
- }
-
- return 0;
-}
-
-static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_cl *cl = (void *) skb->data;
-
- while (skb->len >= sizeof(*cl)) {
- BT_DBG("Controller id %u type %u status %u", cl->id, cl->type,
- cl->status);
- cl = skb_pull(skb, sizeof(*cl));
- }
-
- /* TODO send A2MP_CHANGE_RSP */
-
- return 0;
-}
-
-static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- a2mp_send_getinfo_rsp(hdev);
-}
-
-static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_info_req *req = (void *) skb->data;
- struct hci_dev *hdev;
- struct hci_request hreq;
- int err = 0;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("id %u", req->id);
-
- hdev = hci_dev_get(req->id);
- if (!hdev || hdev->dev_type != HCI_AMP) {
- struct a2mp_info_rsp rsp;
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.id = req->id;
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
-
- a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp),
- &rsp);
-
- goto done;
- }
-
- set_bit(READ_LOC_AMP_INFO, &mgr->state);
- hci_req_init(&hreq, hdev);
- hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
- err = hci_req_run(&hreq, read_local_amp_info_complete);
- if (err < 0)
- a2mp_send_getinfo_rsp(hdev);
-
-done:
- if (hdev)
- hci_dev_put(hdev);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
- struct a2mp_amp_assoc_req req;
- struct amp_ctrl *ctrl;
-
- if (le16_to_cpu(hdr->len) < sizeof(*rsp))
- return -EINVAL;
-
- BT_DBG("id %u status 0x%2.2x", rsp->id, rsp->status);
-
- if (rsp->status)
- return -EINVAL;
-
- ctrl = amp_ctrl_add(mgr, rsp->id);
- if (!ctrl)
- return -ENOMEM;
-
- memset(&req, 0, sizeof(req));
-
- req.id = rsp->id;
- a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
- &req);
-
- skb_pull(skb, sizeof(*rsp));
- return 0;
-}
-
-static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_amp_assoc_req *req = (void *) skb->data;
- struct hci_dev *hdev;
- struct amp_mgr *tmp;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("id %u", req->id);
-
- /* Make sure that other request is not processed */
- tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
-
- hdev = hci_dev_get(req->id);
- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
- struct a2mp_amp_assoc_rsp rsp;
-
- memset(&rsp, 0, sizeof(rsp));
- rsp.id = req->id;
-
- if (tmp) {
- rsp.status = A2MP_STATUS_COLLISION_OCCURED;
- amp_mgr_put(tmp);
- } else {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- }
-
- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
- &rsp);
-
- goto done;
- }
-
- amp_read_loc_assoc(hdev, mgr);
-
-done:
- if (hdev)
- hci_dev_put(hdev);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct hci_dev *hdev;
- struct amp_ctrl *ctrl;
- struct hci_conn *hcon;
- size_t assoc_len;
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- assoc_len = len - sizeof(*rsp);
-
- BT_DBG("id %u status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
- assoc_len);
-
- if (rsp->status)
- return -EINVAL;
-
- /* Save remote ASSOC data */
- ctrl = amp_ctrl_lookup(mgr, rsp->id);
- if (ctrl) {
- u8 *assoc;
-
- assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL);
- if (!assoc) {
- amp_ctrl_put(ctrl);
- return -ENOMEM;
- }
-
- ctrl->assoc = assoc;
- ctrl->assoc_len = assoc_len;
- ctrl->assoc_rem_len = assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- }
-
- /* Create Phys Link */
- hdev = hci_dev_get(rsp->id);
- if (!hdev)
- return -EINVAL;
-
- hcon = phylink_add(hdev, mgr, rsp->id, true);
- if (!hcon)
- goto done;
-
- BT_DBG("Created hcon %p: loc:%u -> rem:%u", hcon, hdev->id, rsp->id);
-
- mgr->bredr_chan->remote_amp_id = rsp->id;
-
- amp_create_phylink(hdev, mgr, hcon);
-
-done:
- hci_dev_put(hdev);
- skb_pull(skb, len);
- return 0;
-}
-
-static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_physlink_req *req = (void *) skb->data;
- struct a2mp_physlink_rsp rsp;
- struct hci_dev *hdev;
- struct hci_conn *hcon;
- struct amp_ctrl *ctrl;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("local_id %u, remote_id %u", req->local_id, req->remote_id);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.local_id = req->remote_id;
- rsp.remote_id = req->local_id;
-
- hdev = hci_dev_get(req->remote_id);
- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- goto send_rsp;
- }
-
- ctrl = amp_ctrl_lookup(mgr, rsp.remote_id);
- if (!ctrl) {
- ctrl = amp_ctrl_add(mgr, rsp.remote_id);
- if (ctrl) {
- amp_ctrl_get(ctrl);
- } else {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- goto send_rsp;
- }
- }
-
- if (ctrl) {
- size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
- u8 *assoc;
-
- assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
- if (!assoc) {
- amp_ctrl_put(ctrl);
- hci_dev_put(hdev);
- return -ENOMEM;
- }
-
- ctrl->assoc = assoc;
- ctrl->assoc_len = assoc_len;
- ctrl->assoc_rem_len = assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- }
-
- hcon = phylink_add(hdev, mgr, req->local_id, false);
- if (hcon) {
- amp_accept_phylink(hdev, mgr, hcon);
- rsp.status = A2MP_STATUS_SUCCESS;
- } else {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- }
-
-send_rsp:
- if (hdev)
- hci_dev_put(hdev);
-
- /* Reply error now and success after HCI Write Remote AMP Assoc
- command complete with success status
- */
- if (rsp.status != A2MP_STATUS_SUCCESS) {
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
- sizeof(rsp), &rsp);
- } else {
- set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
- mgr->ident = hdr->ident;
- }
-
- skb_pull(skb, le16_to_cpu(hdr->len));
- return 0;
-}
-
-static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_physlink_req *req = (void *) skb->data;
- struct a2mp_physlink_rsp rsp;
- struct hci_dev *hdev;
- struct hci_conn *hcon;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("local_id %u remote_id %u", req->local_id, req->remote_id);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.local_id = req->remote_id;
- rsp.remote_id = req->local_id;
- rsp.status = A2MP_STATUS_SUCCESS;
-
- hdev = hci_dev_get(req->remote_id);
- if (!hdev) {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- goto send_rsp;
- }
-
- hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
- &mgr->l2cap_conn->hcon->dst);
- if (!hcon) {
- bt_dev_err(hdev, "no phys link exist");
- rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
- goto clean;
- }
-
- /* TODO Disconnect Phys Link here */
-
-clean:
- hci_dev_put(hdev);
-
-send_rsp:
- a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- BT_DBG("ident %u code 0x%2.2x", hdr->ident, hdr->code);
-
- skb_pull(skb, le16_to_cpu(hdr->len));
- return 0;
-}
-
-/* Handle A2MP signalling */
-static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
-{
- struct a2mp_cmd *hdr;
- struct amp_mgr *mgr = chan->data;
- int err = 0;
-
- amp_mgr_get(mgr);
-
- while (skb->len >= sizeof(*hdr)) {
- u16 len;
-
- hdr = (void *) skb->data;
- len = le16_to_cpu(hdr->len);
-
- BT_DBG("code 0x%2.2x id %u len %u", hdr->code, hdr->ident, len);
-
- skb_pull(skb, sizeof(*hdr));
-
- if (len > skb->len || !hdr->ident) {
- err = -EINVAL;
- break;
- }
-
- mgr->ident = hdr->ident;
-
- switch (hdr->code) {
- case A2MP_COMMAND_REJ:
- a2mp_command_rej(mgr, skb, hdr);
- break;
-
- case A2MP_DISCOVER_REQ:
- err = a2mp_discover_req(mgr, skb, hdr);
- break;
-
- case A2MP_CHANGE_NOTIFY:
- err = a2mp_change_notify(mgr, skb, hdr);
- break;
-
- case A2MP_GETINFO_REQ:
- err = a2mp_getinfo_req(mgr, skb, hdr);
- break;
-
- case A2MP_GETAMPASSOC_REQ:
- err = a2mp_getampassoc_req(mgr, skb, hdr);
- break;
-
- case A2MP_CREATEPHYSLINK_REQ:
- err = a2mp_createphyslink_req(mgr, skb, hdr);
- break;
-
- case A2MP_DISCONNPHYSLINK_REQ:
- err = a2mp_discphyslink_req(mgr, skb, hdr);
- break;
-
- case A2MP_DISCOVER_RSP:
- err = a2mp_discover_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_GETINFO_RSP:
- err = a2mp_getinfo_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_GETAMPASSOC_RSP:
- err = a2mp_getampassoc_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_CHANGE_RSP:
- case A2MP_CREATEPHYSLINK_RSP:
- case A2MP_DISCONNPHYSLINK_RSP:
- err = a2mp_cmd_rsp(mgr, skb, hdr);
- break;
-
- default:
- BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
- err = -EINVAL;
- break;
- }
- }
-
- if (err) {
- struct a2mp_cmd_rej rej;
-
- memset(&rej, 0, sizeof(rej));
-
- rej.reason = cpu_to_le16(0);
- hdr = (void *) skb->data;
-
- BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
-
- a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
- &rej);
- }
-
- /* Always free skb and return success error code to prevent
- from sending L2CAP Disconnect over A2MP channel */
- kfree_skb(skb);
-
- amp_mgr_put(mgr);
-
- return 0;
-}
-
-static void a2mp_chan_close_cb(struct l2cap_chan *chan)
-{
- l2cap_chan_put(chan);
-}
-
-static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
- int err)
-{
- struct amp_mgr *mgr = chan->data;
-
- if (!mgr)
- return;
-
- BT_DBG("chan %p state %s", chan, state_to_string(state));
-
- chan->state = state;
-
- switch (state) {
- case BT_CLOSED:
- if (mgr)
- amp_mgr_put(mgr);
- break;
- }
-}
-
-static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
- unsigned long hdr_len,
- unsigned long len, int nb)
-{
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- return skb;
-}
-
-static const struct l2cap_ops a2mp_chan_ops = {
- .name = "L2CAP A2MP channel",
- .recv = a2mp_chan_recv_cb,
- .close = a2mp_chan_close_cb,
- .state_change = a2mp_chan_state_change_cb,
- .alloc_skb = a2mp_chan_alloc_skb_cb,
-
- /* Not implemented for A2MP */
- .new_connection = l2cap_chan_no_new_connection,
- .teardown = l2cap_chan_no_teardown,
- .ready = l2cap_chan_no_ready,
- .defer = l2cap_chan_no_defer,
- .resume = l2cap_chan_no_resume,
- .set_shutdown = l2cap_chan_no_set_shutdown,
- .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
-};
-
-static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
-{
- struct l2cap_chan *chan;
- int err;
-
- chan = l2cap_chan_create();
- if (!chan)
- return NULL;
-
- BT_DBG("chan %p", chan);
-
- chan->chan_type = L2CAP_CHAN_FIXED;
- chan->scid = L2CAP_CID_A2MP;
- chan->dcid = L2CAP_CID_A2MP;
- chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
- chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
- chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
-
- chan->ops = &a2mp_chan_ops;
-
- l2cap_chan_set_defaults(chan);
- chan->remote_max_tx = chan->max_tx;
- chan->remote_tx_win = chan->tx_win;
-
- chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
- chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
-
- skb_queue_head_init(&chan->tx_q);
-
- chan->mode = L2CAP_MODE_ERTM;
-
- err = l2cap_ertm_init(chan);
- if (err < 0) {
- l2cap_chan_del(chan, 0);
- return NULL;
- }
-
- chan->conf_state = 0;
-
- if (locked)
- __l2cap_chan_add(conn, chan);
- else
- l2cap_chan_add(conn, chan);
-
- chan->remote_mps = chan->omtu;
- chan->mps = chan->omtu;
-
- chan->state = BT_CONNECTED;
-
- return chan;
-}
-
-/* AMP Manager functions */
-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
-{
- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
-
- kref_get(&mgr->kref);
-
- return mgr;
-}
-
-static void amp_mgr_destroy(struct kref *kref)
-{
- struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
-
- BT_DBG("mgr %p", mgr);
-
- mutex_lock(&amp_mgr_list_lock);
- list_del(&mgr->list);
- mutex_unlock(&amp_mgr_list_lock);
-
- amp_ctrl_list_flush(mgr);
- kfree(mgr);
-}
-
-int amp_mgr_put(struct amp_mgr *mgr)
-{
- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
-
- return kref_put(&mgr->kref, &amp_mgr_destroy);
-}
-
-static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked)
-{
- struct amp_mgr *mgr;
- struct l2cap_chan *chan;
-
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return NULL;
-
- BT_DBG("conn %p mgr %p", conn, mgr);
-
- mgr->l2cap_conn = conn;
-
- chan = a2mp_chan_open(conn, locked);
- if (!chan) {
- kfree(mgr);
- return NULL;
- }
-
- mgr->a2mp_chan = chan;
- chan->data = mgr;
-
- conn->hcon->amp_mgr = mgr;
-
- kref_init(&mgr->kref);
-
- /* Remote AMP ctrl list initialization */
- INIT_LIST_HEAD(&mgr->amp_ctrls);
- mutex_init(&mgr->amp_ctrls_lock);
-
- mutex_lock(&amp_mgr_list_lock);
- list_add(&mgr->list, &amp_mgr_list);
- mutex_unlock(&amp_mgr_list_lock);
-
- return mgr;
-}
-
-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb)
-{
- struct amp_mgr *mgr;
-
- if (conn->hcon->type != ACL_LINK)
- return NULL;
-
- mgr = amp_mgr_create(conn, false);
- if (!mgr) {
- BT_ERR("Could not create AMP manager");
- return NULL;
- }
-
- BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
-
- return mgr->a2mp_chan;
-}
-
-void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
-{
- struct amp_mgr *mgr;
- struct a2mp_info_rsp rsp;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO);
- if (!mgr)
- return;
-
- BT_DBG("%s mgr %p", hdev->name, mgr);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.id = hdev->id;
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
-
- if (hdev->amp_type != AMP_TYPE_BREDR) {
- rsp.status = 0;
- rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
- rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
- rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
- rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
- rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
- }
-
- a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp);
- amp_mgr_put(mgr);
-}
-
-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct a2mp_amp_assoc_rsp *rsp;
- size_t len;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
- if (!mgr)
- return;
-
- BT_DBG("%s mgr %p", hdev->name, mgr);
-
- len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len;
- rsp = kzalloc(len, GFP_KERNEL);
- if (!rsp) {
- amp_mgr_put(mgr);
- return;
- }
-
- rsp->id = hdev->id;
-
- if (status) {
- rsp->status = A2MP_STATUS_INVALID_CTRL_ID;
- } else {
- rsp->status = A2MP_STATUS_SUCCESS;
- memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len);
- }
-
- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp);
- amp_mgr_put(mgr);
- kfree(rsp);
-}
-
-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct a2mp_physlink_req *req;
- struct l2cap_chan *bredr_chan;
- size_t len;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL);
- if (!mgr)
- return;
-
- len = sizeof(*req) + loc_assoc->len;
-
- BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len);
-
- req = kzalloc(len, GFP_KERNEL);
- if (!req) {
- amp_mgr_put(mgr);
- return;
- }
-
- bredr_chan = mgr->bredr_chan;
- if (!bredr_chan)
- goto clean;
-
- req->local_id = hdev->id;
- req->remote_id = bredr_chan->remote_amp_id;
- memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
-
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
-
-clean:
- amp_mgr_put(mgr);
- kfree(req);
-}
-
-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct a2mp_physlink_rsp rsp;
- struct hci_conn *hs_hcon;
-
- mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
- if (!mgr)
- return;
-
- memset(&rsp, 0, sizeof(rsp));
-
- hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
- if (!hs_hcon) {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- } else {
- rsp.remote_id = hs_hcon->remote_id;
- rsp.status = A2MP_STATUS_SUCCESS;
- }
-
- BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
- status);
-
- rsp.local_id = hdev->id;
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
- amp_mgr_put(mgr);
-}
-
-void a2mp_discover_amp(struct l2cap_chan *chan)
-{
- struct l2cap_conn *conn = chan->conn;
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
- struct a2mp_discov_req req;
-
- BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr);
-
- if (!mgr) {
- mgr = amp_mgr_create(conn, true);
- if (!mgr)
- return;
- }
-
- mgr->bredr_chan = chan;
-
- memset(&req, 0, sizeof(req));
-
- req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- req.ext_feat = 0;
- a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
-}
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
deleted file mode 100644
index 2fd253a61a2a..000000000000
--- a/net/bluetooth/a2mp.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#ifndef __A2MP_H
-#define __A2MP_H
-
-#include <net/bluetooth/l2cap.h>
-
-enum amp_mgr_state {
- READ_LOC_AMP_INFO,
- READ_LOC_AMP_ASSOC,
- READ_LOC_AMP_ASSOC_FINAL,
- WRITE_REMOTE_AMP_ASSOC,
-};
-
-struct amp_mgr {
- struct list_head list;
- struct l2cap_conn *l2cap_conn;
- struct l2cap_chan *a2mp_chan;
- struct l2cap_chan *bredr_chan;
- struct kref kref;
- __u8 ident;
- __u8 handle;
- unsigned long state;
- unsigned long flags;
-
- struct list_head amp_ctrls;
- struct mutex amp_ctrls_lock;
-};
-
-struct a2mp_cmd {
- __u8 code;
- __u8 ident;
- __le16 len;
- __u8 data[];
-} __packed;
-
-/* A2MP command codes */
-#define A2MP_COMMAND_REJ 0x01
-struct a2mp_cmd_rej {
- __le16 reason;
- __u8 data[];
-} __packed;
-
-#define A2MP_DISCOVER_REQ 0x02
-struct a2mp_discov_req {
- __le16 mtu;
- __le16 ext_feat;
-} __packed;
-
-struct a2mp_cl {
- __u8 id;
- __u8 type;
- __u8 status;
-} __packed;
-
-#define A2MP_DISCOVER_RSP 0x03
-struct a2mp_discov_rsp {
- __le16 mtu;
- __le16 ext_feat;
- struct a2mp_cl cl[];
-} __packed;
-
-#define A2MP_CHANGE_NOTIFY 0x04
-#define A2MP_CHANGE_RSP 0x05
-
-#define A2MP_GETINFO_REQ 0x06
-struct a2mp_info_req {
- __u8 id;
-} __packed;
-
-#define A2MP_GETINFO_RSP 0x07
-struct a2mp_info_rsp {
- __u8 id;
- __u8 status;
- __le32 total_bw;
- __le32 max_bw;
- __le32 min_latency;
- __le16 pal_cap;
- __le16 assoc_size;
-} __packed;
-
-#define A2MP_GETAMPASSOC_REQ 0x08
-struct a2mp_amp_assoc_req {
- __u8 id;
-} __packed;
-
-#define A2MP_GETAMPASSOC_RSP 0x09
-struct a2mp_amp_assoc_rsp {
- __u8 id;
- __u8 status;
- __u8 amp_assoc[];
-} __packed;
-
-#define A2MP_CREATEPHYSLINK_REQ 0x0A
-#define A2MP_DISCONNPHYSLINK_REQ 0x0C
-struct a2mp_physlink_req {
- __u8 local_id;
- __u8 remote_id;
- __u8 amp_assoc[];
-} __packed;
-
-#define A2MP_CREATEPHYSLINK_RSP 0x0B
-#define A2MP_DISCONNPHYSLINK_RSP 0x0D
-struct a2mp_physlink_rsp {
- __u8 local_id;
- __u8 remote_id;
- __u8 status;
-} __packed;
-
-/* A2MP response status */
-#define A2MP_STATUS_SUCCESS 0x00
-#define A2MP_STATUS_INVALID_CTRL_ID 0x01
-#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
-#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
-#define A2MP_STATUS_COLLISION_OCCURED 0x03
-#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
-#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
-#define A2MP_STATUS_SECURITY_VIOLATION 0x06
-
-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
-
-#if IS_ENABLED(CONFIG_BT_HS)
-int amp_mgr_put(struct amp_mgr *mgr);
-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb);
-void a2mp_discover_amp(struct l2cap_chan *chan);
-#else
-static inline int amp_mgr_put(struct amp_mgr *mgr)
-{
- return 0;
-}
-
-static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb)
-{
- return NULL;
-}
-
-static inline void a2mp_discover_amp(struct l2cap_chan *chan)
-{
-}
-#endif
-
-void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status);
-
-#endif /* __A2MP_H */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index b93464ac3517..67604ccec2f4 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -309,14 +309,11 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & MSG_OOB)
return -EOPNOTSUPP;
- lock_sock(sk);
-
skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
err = 0;
- release_sock(sk);
return err;
}
@@ -346,8 +343,6 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
skb_free_datagram(sk, skb);
- release_sock(sk);
-
if (flags & MSG_TRUNC)
copied = skblen;
@@ -570,10 +565,11 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
- lock_sock(sk);
+ spin_lock(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
- release_sock(sk);
+ spin_unlock(&sk->sk_receive_queue.lock);
+
err = put_user(amount, (int __user *)arg);
break;
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
deleted file mode 100644
index 5d698f19868c..000000000000
--- a/net/bluetooth/amp.c
+++ /dev/null
@@ -1,590 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci.h>
-#include <net/bluetooth/hci_core.h>
-#include <crypto/hash.h>
-
-#include "hci_request.h"
-#include "a2mp.h"
-#include "amp.h"
-
-/* Remote AMP Controllers interface */
-void amp_ctrl_get(struct amp_ctrl *ctrl)
-{
- BT_DBG("ctrl %p orig refcnt %d", ctrl,
- kref_read(&ctrl->kref));
-
- kref_get(&ctrl->kref);
-}
-
-static void amp_ctrl_destroy(struct kref *kref)
-{
- struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
-
- BT_DBG("ctrl %p", ctrl);
-
- kfree(ctrl->assoc);
- kfree(ctrl);
-}
-
-int amp_ctrl_put(struct amp_ctrl *ctrl)
-{
- BT_DBG("ctrl %p orig refcnt %d", ctrl,
- kref_read(&ctrl->kref));
-
- return kref_put(&ctrl->kref, &amp_ctrl_destroy);
-}
-
-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
-{
- struct amp_ctrl *ctrl;
-
- ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return NULL;
-
- kref_init(&ctrl->kref);
- ctrl->id = id;
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_add(&ctrl->list, &mgr->amp_ctrls);
- mutex_unlock(&mgr->amp_ctrls_lock);
-
- BT_DBG("mgr %p ctrl %p", mgr, ctrl);
-
- return ctrl;
-}
-
-void amp_ctrl_list_flush(struct amp_mgr *mgr)
-{
- struct amp_ctrl *ctrl, *n;
-
- BT_DBG("mgr %p", mgr);
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
- list_del(&ctrl->list);
- amp_ctrl_put(ctrl);
- }
- mutex_unlock(&mgr->amp_ctrls_lock);
-}
-
-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
-{
- struct amp_ctrl *ctrl;
-
- BT_DBG("mgr %p id %u", mgr, id);
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
- if (ctrl->id == id) {
- amp_ctrl_get(ctrl);
- mutex_unlock(&mgr->amp_ctrls_lock);
- return ctrl;
- }
- }
- mutex_unlock(&mgr->amp_ctrls_lock);
-
- return NULL;
-}
-
-/* Physical Link interface */
-static u8 __next_handle(struct amp_mgr *mgr)
-{
- if (++mgr->handle == 0)
- mgr->handle = 1;
-
- return mgr->handle;
-}
-
-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
- u8 remote_id, bool out)
-{
- bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
- struct hci_conn *hcon;
- u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
-
- hcon = hci_conn_add(hdev, AMP_LINK, dst, role, __next_handle(mgr));
- if (!hcon)
- return NULL;
-
- BT_DBG("hcon %p dst %pMR", hcon, dst);
-
- hcon->state = BT_CONNECT;
- hcon->attempt++;
- hcon->remote_id = remote_id;
- hcon->amp_mgr = amp_mgr_get(mgr);
-
- return hcon;
-}
-
-/* AMP crypto key generation interface */
-static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
-{
- struct crypto_shash *tfm;
- struct shash_desc *shash;
- int ret;
-
- if (!ksize)
- return -EINVAL;
-
- tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
- if (IS_ERR(tfm)) {
- BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- ret = crypto_shash_setkey(tfm, key, ksize);
- if (ret) {
- BT_DBG("crypto_ahash_setkey failed: err %d", ret);
- goto failed;
- }
-
- shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!shash) {
- ret = -ENOMEM;
- goto failed;
- }
-
- shash->tfm = tfm;
-
- ret = crypto_shash_digest(shash, plaintext, psize, output);
-
- kfree(shash);
-
-failed:
- crypto_free_shash(tfm);
- return ret;
-}
-
-int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
-{
- struct hci_dev *hdev = conn->hdev;
- struct link_key *key;
- u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
- u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
- int err;
-
- if (!hci_conn_check_link_mode(conn))
- return -EACCES;
-
- BT_DBG("conn %p key_type %d", conn, conn->key_type);
-
- /* Legacy key */
- if (conn->key_type < 3) {
- bt_dev_err(hdev, "legacy key type %u", conn->key_type);
- return -EACCES;
- }
-
- *type = conn->key_type;
- *len = HCI_AMP_LINK_KEY_SIZE;
-
- key = hci_find_link_key(hdev, &conn->dst);
- if (!key) {
- BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
- return -EACCES;
- }
-
- /* BR/EDR Link Key concatenated together with itself */
- memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
- memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
-
- /* Derive Generic AMP Link Key (gamp) */
- err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
- if (err) {
- bt_dev_err(hdev, "could not derive Generic AMP Key: err %d", err);
- return err;
- }
-
- if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
- BT_DBG("Use Generic AMP Key (gamp)");
- memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
- return err;
- }
-
- /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
- return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
-}
-
-static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
-{
- struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
- struct amp_assoc *assoc = &hdev->loc_assoc;
- size_t rem_len, frag_len;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
- if (rp->status)
- goto send_rsp;
-
- frag_len = skb->len - sizeof(*rp);
- rem_len = __le16_to_cpu(rp->rem_len);
-
- if (rem_len > frag_len) {
- BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
- memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
- assoc->offset += frag_len;
-
- /* Read other fragments */
- amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
- return;
- }
-
- memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
- assoc->len = assoc->offset + rem_len;
- assoc->offset = 0;
-
-send_rsp:
- /* Send A2MP Rsp when all fragments are received */
- a2mp_send_getampassoc_rsp(hdev, rp->status);
- a2mp_send_create_phy_link_req(hdev, rp->status);
-}
-
-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct hci_request req;
- int err;
-
- BT_DBG("%s handle %u", hdev->name, phy_handle);
-
- cp.phy_handle = phy_handle;
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
- cp.len_so_far = cpu_to_le16(loc_assoc->offset);
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct hci_request req;
- int err;
-
- memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
- memset(&cp, 0, sizeof(cp));
-
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
-
- set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
- struct hci_conn *hcon)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct amp_mgr *mgr = hcon->amp_mgr;
- struct hci_request req;
- int err;
-
- if (!mgr)
- return;
-
- cp.phy_handle = hcon->handle;
- cp.len_so_far = cpu_to_le16(0);
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
-
- set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
-
- /* Read Local AMP Assoc final link information data */
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
-{
- struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
-
- BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
- hdev->name, rp->status, rp->phy_handle);
-
- if (rp->status)
- return;
-
- amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
-/* Write AMP Assoc data fragments, returns true with last fragment written*/
-static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
- struct hci_conn *hcon)
-{
- struct hci_cp_write_remote_amp_assoc *cp;
- struct amp_mgr *mgr = hcon->amp_mgr;
- struct amp_ctrl *ctrl;
- struct hci_request req;
- u16 frag_len, len;
-
- ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
- if (!ctrl)
- return false;
-
- if (!ctrl->assoc_rem_len) {
- BT_DBG("all fragments are written");
- ctrl->assoc_rem_len = ctrl->assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- return true;
- }
-
- frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
- len = frag_len + sizeof(*cp);
-
- cp = kzalloc(len, GFP_KERNEL);
- if (!cp) {
- amp_ctrl_put(ctrl);
- return false;
- }
-
- BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
- hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
-
- cp->phy_handle = hcon->handle;
- cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
- cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
- memcpy(cp->frag, ctrl->assoc, frag_len);
-
- ctrl->assoc_len_so_far += frag_len;
- ctrl->assoc_rem_len -= frag_len;
-
- amp_ctrl_put(ctrl);
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
- hci_req_run_skb(&req, write_remote_amp_assoc_complete);
-
- kfree(cp);
-
- return false;
-}
-
-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
-{
- struct hci_conn *hcon;
-
- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, handle);
- if (!hcon)
- return;
-
- /* Send A2MP create phylink rsp when all fragments are written */
- if (amp_write_rem_assoc_frag(hdev, hcon))
- a2mp_send_create_phy_link_rsp(hdev, 0);
-}
-
-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
-{
- struct hci_conn *hcon;
-
- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, handle);
- if (!hcon)
- return;
-
- BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
-
- amp_write_rem_assoc_frag(hdev, hcon);
-}
-
-static void create_phylink_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- struct hci_cp_create_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- if (status) {
- struct hci_conn *hcon;
-
- hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
- if (hcon)
- hci_conn_del(hcon);
- } else {
- amp_write_remote_assoc(hdev, cp->phy_handle);
- }
-
- hci_dev_unlock(hdev);
-}
-
-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon)
-{
- struct hci_cp_create_phy_link cp;
- struct hci_request req;
-
- cp.phy_handle = hcon->handle;
-
- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
- hcon->handle);
-
- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
- &cp.key_type)) {
- BT_DBG("Cannot create link key");
- return;
- }
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
- hci_req_run(&req, create_phylink_complete);
-}
-
-static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- struct hci_cp_accept_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- if (status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
- if (!cp)
- return;
-
- amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon)
-{
- struct hci_cp_accept_phy_link cp;
- struct hci_request req;
-
- cp.phy_handle = hcon->handle;
-
- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
- hcon->handle);
-
- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
- &cp.key_type)) {
- BT_DBG("Cannot create link key");
- return;
- }
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
- hci_req_run(&req, accept_phylink_complete);
-}
-
-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
-{
- struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
- struct amp_mgr *mgr = hs_hcon->amp_mgr;
- struct l2cap_chan *bredr_chan;
-
- BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
-
- if (!bredr_hdev || !mgr || !mgr->bredr_chan)
- return;
-
- bredr_chan = mgr->bredr_chan;
-
- l2cap_chan_lock(bredr_chan);
-
- set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
- bredr_chan->remote_amp_id = hs_hcon->remote_id;
- bredr_chan->local_amp_id = hs_hcon->hdev->id;
- bredr_chan->hs_hcon = hs_hcon;
- bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
-
- __l2cap_physical_cfm(bredr_chan, 0);
-
- l2cap_chan_unlock(bredr_chan);
-
- hci_dev_put(bredr_hdev);
-}
-
-void amp_create_logical_link(struct l2cap_chan *chan)
-{
- struct hci_conn *hs_hcon = chan->hs_hcon;
- struct hci_cp_create_accept_logical_link cp;
- struct hci_dev *hdev;
-
- BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon,
- &chan->conn->hcon->dst);
-
- if (!hs_hcon)
- return;
-
- hdev = hci_dev_hold(chan->hs_hcon->hdev);
- if (!hdev)
- return;
-
- cp.phy_handle = hs_hcon->handle;
-
- cp.tx_flow_spec.id = chan->local_id;
- cp.tx_flow_spec.stype = chan->local_stype;
- cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
- cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
- cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
- cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
-
- cp.rx_flow_spec.id = chan->remote_id;
- cp.rx_flow_spec.stype = chan->remote_stype;
- cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
- cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
- cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
- cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
-
- if (hs_hcon->out)
- hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
- &cp);
- else
- hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
- &cp);
-
- hci_dev_put(hdev);
-}
-
-void amp_disconnect_logical_link(struct hci_chan *hchan)
-{
- struct hci_conn *hcon = hchan->conn;
- struct hci_cp_disconn_logical_link cp;
-
- if (hcon->state != BT_CONNECTED) {
- BT_DBG("hchan %p not connected", hchan);
- return;
- }
-
- cp.log_handle = cpu_to_le16(hchan->handle);
- hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
-}
-
-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
-{
- BT_DBG("hchan %p", hchan);
-
- hci_chan_del(hchan);
-}
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
deleted file mode 100644
index 97c87abd129f..000000000000
--- a/net/bluetooth/amp.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#ifndef __AMP_H
-#define __AMP_H
-
-struct amp_ctrl {
- struct list_head list;
- struct kref kref;
- __u8 id;
- __u16 assoc_len_so_far;
- __u16 assoc_rem_len;
- __u16 assoc_len;
- __u8 *assoc;
-};
-
-int amp_ctrl_put(struct amp_ctrl *ctrl);
-void amp_ctrl_get(struct amp_ctrl *ctrl);
-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id);
-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id);
-void amp_ctrl_list_flush(struct amp_mgr *mgr);
-
-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
- u8 remote_id, bool out);
-
-int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type);
-
-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle);
-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr);
-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
- struct hci_conn *hcon);
-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon);
-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon);
-
-#if IS_ENABLED(CONFIG_BT_HS)
-void amp_create_logical_link(struct l2cap_chan *chan);
-void amp_disconnect_logical_link(struct hci_chan *hchan);
-#else
-static inline void amp_create_logical_link(struct l2cap_chan *chan)
-{
-}
-
-static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
-{
-}
-#endif
-
-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
-void amp_create_logical_link(struct l2cap_chan *chan);
-void amp_disconnect_logical_link(struct hci_chan *hchan);
-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason);
-
-#endif /* __AMP_H */
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5a6a49885ab6..ec45f77fce21 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -385,7 +385,8 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
case BNEP_COMPRESSED_DST_ONLY:
__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
- __skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2);
+ __skb_put_data(nskb, s->eh.h_source, ETH_ALEN);
+ put_unaligned(s->eh.h_proto, (__be16 *)__skb_put(nskb, 2));
break;
case BNEP_GENERAL:
@@ -549,7 +550,7 @@ static struct device *bnep_get_device(struct bnep_session *session)
return &conn->hcon->dev;
}
-static struct device_type bnep_type = {
+static const struct device_type bnep_type = {
.name = "bluetooth",
};
diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c
index 9214189279e8..1bc51e2b05a3 100644
--- a/net/bluetooth/eir.c
+++ b/net/bluetooth/eir.c
@@ -13,48 +13,33 @@
#define PNP_INFO_SVCLASS_ID 0x1200
-static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len)
-{
- u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
-
- /* If data is already NULL terminated just pass it directly */
- if (data[data_len - 1] == '\0')
- return eir_append_data(eir, eir_len, type, data, data_len);
-
- memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH);
- name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
-
- return eir_append_data(eir, eir_len, type, name, sizeof(name));
-}
-
u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
size_t short_len;
size_t complete_len;
- /* no space left for name (+ NULL + type + len) */
- if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
+ /* no space left for name (+ type + len) */
+ if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 2)
return ad_len;
/* use complete name if present and fits */
complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
- return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE,
- hdev->dev_name, complete_len + 1);
+ return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
+ hdev->dev_name, complete_len);
/* use short name if present */
short_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
if (short_len)
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
hdev->short_name,
- short_len == HCI_MAX_SHORT_NAME_LENGTH ?
- short_len : short_len + 1);
+ short_len);
/* use shortened full name if present, we already know that name
* is longer then HCI_MAX_SHORT_NAME_LENGTH
*/
if (complete_len)
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
hdev->dev_name,
HCI_MAX_SHORT_NAME_LENGTH);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a41d2693f4d8..3ad74f76983b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -36,7 +36,6 @@
#include "hci_request.h"
#include "smp.h"
-#include "a2mp.h"
#include "eir.h"
struct sco_param {
@@ -69,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = {
};
/* This function requires the caller holds hdev->lock */
-static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
{
struct hci_conn_params *params;
struct hci_dev *hdev = conn->hdev;
@@ -179,64 +178,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hci_dev_put(hdev);
}
-static void hci_acl_create_connection(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct inquiry_entry *ie;
- struct hci_cp_create_conn cp;
-
- BT_DBG("hcon %p", conn);
-
- /* Many controllers disallow HCI Create Connection while it is doing
- * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
- * Connection. This may cause the MGMT discovering state to become false
- * without user space's request but it is okay since the MGMT Discovery
- * APIs do not promise that discovery should be done forever. Instead,
- * the user space monitors the status of MGMT discovering and it may
- * request for discovery again when this flag becomes false.
- */
- if (test_bit(HCI_INQUIRY, &hdev->flags)) {
- /* Put this connection to "pending" state so that it will be
- * executed after the inquiry cancel command complete event.
- */
- conn->state = BT_CONNECT2;
- hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
- return;
- }
-
- conn->state = BT_CONNECT;
- conn->out = true;
- conn->role = HCI_ROLE_MASTER;
-
- conn->attempt++;
-
- conn->link_policy = hdev->link_policy;
-
- memset(&cp, 0, sizeof(cp));
- bacpy(&cp.bdaddr, &conn->dst);
- cp.pscan_rep_mode = 0x02;
-
- ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
- if (ie) {
- if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
- cp.pscan_rep_mode = ie->data.pscan_rep_mode;
- cp.pscan_mode = ie->data.pscan_mode;
- cp.clock_offset = ie->data.clock_offset |
- cpu_to_le16(0x8000);
- }
-
- memcpy(conn->dev_class, ie->data.dev_class, 3);
- }
-
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
- if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
- cp.role_switch = 0x01;
- else
- cp.role_switch = 0x00;
-
- hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
-}
-
int hci_disconnect(struct hci_conn *conn, __u8 reason)
{
BT_DBG("hcon %p", conn);
@@ -1175,9 +1116,6 @@ void hci_conn_del(struct hci_conn *conn)
}
}
- if (conn->amp_mgr)
- amp_mgr_put(conn->amp_mgr);
-
skb_queue_purge(&conn->data_q);
/* Remove the connection from the list and cleanup its remaining
@@ -1186,6 +1124,9 @@ void hci_conn_del(struct hci_conn *conn)
* rest of hci_conn_del.
*/
hci_conn_cleanup(conn);
+
+ /* Dequeue callbacks using connection pointer as data */
+ hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
}
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
@@ -1320,53 +1261,6 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
return 0;
}
-static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
-{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return;
-
- bt_dev_dbg(hdev, "err %d", err);
-
- hci_dev_lock(hdev);
-
- if (!err) {
- hci_connect_le_scan_cleanup(conn, 0x00);
- goto done;
- }
-
- /* Check if connection is still pending */
- if (conn != hci_lookup_le_connect(hdev))
- goto done;
-
- /* Flush to make sure we send create conn cancel command if needed */
- flush_delayed_work(&conn->le_conn_timeout);
- hci_conn_failed(conn, bt_status(err));
-
-done:
- hci_dev_unlock(hdev);
-}
-
-static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
-{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return 0;
-
- bt_dev_dbg(hdev, "conn %p", conn);
-
- clear_bit(HCI_CONN_SCANNING, &conn->flags);
- conn->state = BT_CONNECT;
-
- return hci_le_create_conn_sync(hdev, conn);
-}
-
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
u16 conn_timeout, u8 role)
@@ -1433,9 +1327,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout;
- err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
- UINT_PTR(conn->handle),
- create_le_conn_complete);
+ err = hci_connect_le_sync(hdev, conn);
if (err) {
hci_conn_del(conn);
return ERR_PTR(err);
@@ -1669,7 +1561,7 @@ done:
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
- enum conn_reasons conn_reason)
+ enum conn_reasons conn_reason, u16 timeout)
{
struct hci_conn *acl;
@@ -1700,10 +1592,18 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
acl->conn_reason = conn_reason;
if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
+ int err;
+
acl->sec_level = BT_SECURITY_LOW;
acl->pending_sec_level = sec_level;
acl->auth_type = auth_type;
- hci_acl_create_connection(acl);
+ acl->conn_timeout = timeout;
+
+ err = hci_connect_acl_sync(hdev, acl);
+ if (err) {
+ hci_conn_del(acl);
+ return ERR_PTR(err);
+ }
}
return acl;
@@ -1738,14 +1638,15 @@ static struct hci_link *hci_conn_link(struct hci_conn *parent,
}
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting, struct bt_codec *codec)
+ __u16 setting, struct bt_codec *codec,
+ u16 timeout)
{
struct hci_conn *acl;
struct hci_conn *sco;
struct hci_link *link;
acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
- CONN_REASON_SCO_CONNECT);
+ CONN_REASON_SCO_CONNECT, timeout);
if (IS_ERR(acl))
return acl;
@@ -2156,18 +2057,31 @@ static int create_pa_sync(struct hci_dev *hdev, void *data)
return hci_update_passive_scan_sync(hdev);
}
-int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
- __u8 sid, struct bt_iso_qos *qos)
+struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, __u8 sid,
+ struct bt_iso_qos *qos)
{
struct hci_cp_le_pa_create_sync *cp;
+ struct hci_conn *conn;
+ int err;
if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
+
+ conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+ conn->iso_qos = *qos;
+ conn->state = BT_LISTEN;
+
+ hci_conn_hold(conn);
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp) {
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- return -ENOMEM;
+ hci_conn_drop(conn);
+ return ERR_PTR(-ENOMEM);
}
cp->options = qos->bcast.options;
@@ -2179,7 +2093,14 @@ int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
cp->sync_cte_type = qos->bcast.sync_cte_type;
/* Queue start pa_create_sync and scan */
- return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
+ err = hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
+ if (err < 0) {
+ hci_conn_drop(conn);
+ kfree(cp);
+ return ERR_PTR(err);
+ }
+
+ return conn;
}
int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
@@ -2647,22 +2568,6 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
}
}
-/* Check pending connect attempts */
-void hci_conn_check_pending(struct hci_dev *hdev)
-{
- struct hci_conn *conn;
-
- BT_DBG("hdev %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
- if (conn)
- hci_acl_create_connection(conn);
-
- hci_dev_unlock(hdev);
-}
-
static u32 get_link_mode(struct hci_conn *conn)
{
u32 link_mode = 0;
@@ -2978,12 +2883,10 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
static int abort_conn_sync(struct hci_dev *hdev, void *data)
{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
+ struct hci_conn *conn = data;
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return 0;
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
}
@@ -3011,14 +2914,17 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
*/
if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
switch (hci_skb_event(hdev->sent_cmd)) {
+ case HCI_EV_CONN_COMPLETE:
case HCI_EV_LE_CONN_COMPLETE:
case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
case HCI_EVT_LE_CIS_ESTABLISHED:
- hci_cmd_sync_cancel(hdev, -ECANCELED);
+ hci_cmd_sync_cancel(hdev, ECANCELED);
break;
}
+ /* Cancel connect attempt if still queued/pending */
+ } else if (!hci_cancel_connect_sync(hdev, conn)) {
+ return 0;
}
- return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
- NULL);
+ return hci_cmd_sync_queue_once(hdev, abort_conn_sync, conn, NULL);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2821a42cefdc..1690ae57a09d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -908,7 +908,7 @@ int hci_get_dev_info(void __user *arg)
else
flags = hdev->flags;
- strcpy(di.name, hdev->name);
+ strscpy(di.name, hdev->name, sizeof(di.name));
di.bdaddr = hdev->bdaddr;
di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
di.flags = flags;
@@ -940,20 +940,51 @@ int hci_get_dev_info(void __user *arg)
/* ---- Interface to HCI drivers ---- */
+static int hci_dev_do_poweroff(struct hci_dev *hdev)
+{
+ int err;
+
+ BT_DBG("%s %p", hdev->name, hdev);
+
+ hci_req_sync_lock(hdev);
+
+ err = hci_set_powered_sync(hdev, false);
+
+ hci_req_sync_unlock(hdev);
+
+ return err;
+}
+
static int hci_rfkill_set_block(void *data, bool blocked)
{
struct hci_dev *hdev = data;
+ int err;
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY;
+ if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
+ return 0;
+
if (blocked) {
hci_dev_set_flag(hdev, HCI_RFKILLED);
+
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !hci_dev_test_flag(hdev, HCI_CONFIG))
- hci_dev_do_close(hdev);
+ !hci_dev_test_flag(hdev, HCI_CONFIG)) {
+ err = hci_dev_do_poweroff(hdev);
+ if (err) {
+ bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
+ err);
+
+ /* Make sure the device is still closed even if
+ * anything during power off sequence (eg.
+ * disconnecting devices) failed.
+ */
+ hci_dev_do_close(hdev);
+ }
+ }
} else {
hci_dev_clear_flag(hdev, HCI_RFKILLED);
}
@@ -1491,11 +1522,12 @@ static void hci_cmd_timeout(struct work_struct *work)
struct hci_dev *hdev = container_of(work, struct hci_dev,
cmd_timer.work);
- if (hdev->sent_cmd) {
- struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
- u16 opcode = __le16_to_cpu(sent->opcode);
+ if (hdev->req_skb) {
+ u16 opcode = hci_skb_opcode(hdev->req_skb);
bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
+
+ hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
} else {
bt_dev_err(hdev, "command tx timeout");
}
@@ -2608,10 +2640,11 @@ int hci_register_dev(struct hci_dev *hdev)
*/
switch (hdev->dev_type) {
case HCI_PRIMARY:
- id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
+ id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
break;
case HCI_AMP:
- id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
+ id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1,
+ GFP_KERNEL);
break;
default:
return -EINVAL;
@@ -2710,7 +2743,7 @@ err_wqueue:
destroy_workqueue(hdev->workqueue);
destroy_workqueue(hdev->req_workqueue);
err:
- ida_simple_remove(&hci_index_ida, hdev->id);
+ ida_free(&hci_index_ida, hdev->id);
return error;
}
@@ -2793,8 +2826,9 @@ void hci_release_dev(struct hci_dev *hdev)
hci_dev_unlock(hdev);
ida_destroy(&hdev->unset_handle_ida);
- ida_simple_remove(&hci_index_ida, hdev->id);
+ ida_free(&hci_index_ida, hdev->id);
kfree_skb(hdev->sent_cmd);
+ kfree_skb(hdev->req_skb);
kfree_skb(hdev->recv_event);
kfree(hdev);
}
@@ -2826,6 +2860,23 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)
return ret;
}
+/* Cancel ongoing command synchronously:
+ *
+ * - Cancel command timer
+ * - Reset command counter
+ * - Cancel command request
+ */
+static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
+{
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
+
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ cancel_delayed_work_sync(&hdev->ncmd_timer);
+ atomic_set(&hdev->cmd_cnt, 1);
+
+ hci_cmd_sync_cancel_sync(hdev, -err);
+}
+
/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
@@ -2843,7 +2894,7 @@ int hci_suspend_dev(struct hci_dev *hdev)
return 0;
/* Cancel potentially blocking sync operation before suspend */
- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
+ hci_cancel_cmd_sync(hdev, -EHOSTDOWN);
hci_req_sync_lock(hdev);
ret = hci_suspend_sync(hdev);
@@ -3107,21 +3158,33 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
EXPORT_SYMBOL(__hci_cmd_send);
/* Get data from the previously sent command */
-void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
{
struct hci_command_hdr *hdr;
- if (!hdev->sent_cmd)
+ if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
return NULL;
- hdr = (void *) hdev->sent_cmd->data;
+ hdr = (void *)skb->data;
if (hdr->opcode != cpu_to_le16(opcode))
return NULL;
- BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+ return skb->data + HCI_COMMAND_HDR_SIZE;
+}
- return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
+/* Get data from the previously sent command */
+void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+{
+ void *data;
+
+ /* Check if opcode matches last sent command */
+ data = hci_cmd_data(hdev->sent_cmd, opcode);
+ if (!data)
+ /* Check if opcode matches last request */
+ data = hci_cmd_data(hdev->req_skb, opcode);
+
+ return data;
}
/* Get data from last received event */
@@ -4022,17 +4085,19 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
if (!status && !hci_req_is_complete(hdev))
return;
+ skb = hdev->req_skb;
+
/* If this was the last command in a request the complete
- * callback would be found in hdev->sent_cmd instead of the
+ * callback would be found in hdev->req_skb instead of the
* command queue (hdev->cmd_q).
*/
- if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
- *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
+ if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
+ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
return;
}
- if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
- *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
+ if (skb && bt_cb(skb)->hci.req_complete) {
+ *req_complete = bt_cb(skb)->hci.req_complete;
return;
}
@@ -4128,6 +4193,36 @@ static void hci_rx_work(struct work_struct *work)
}
}
+static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "skb %p", skb);
+
+ kfree_skb(hdev->sent_cmd);
+
+ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
+ if (!hdev->sent_cmd) {
+ skb_queue_head(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ return;
+ }
+
+ err = hci_send_frame(hdev, skb);
+ if (err < 0) {
+ hci_cmd_sync_cancel_sync(hdev, err);
+ return;
+ }
+
+ if (hci_req_status_pend(hdev) &&
+ !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
+ }
+
+ atomic_dec(&hdev->cmd_cnt);
+}
+
static void hci_cmd_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
@@ -4142,30 +4237,15 @@ static void hci_cmd_work(struct work_struct *work)
if (!skb)
return;
- kfree_skb(hdev->sent_cmd);
-
- hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
- if (hdev->sent_cmd) {
- int res;
- if (hci_req_status_pend(hdev))
- hci_dev_set_flag(hdev, HCI_CMD_PENDING);
- atomic_dec(&hdev->cmd_cnt);
+ hci_send_cmd_sync(hdev, skb);
- res = hci_send_frame(hdev, skb);
- if (res < 0)
- __hci_cmd_sync_cancel(hdev, -res);
-
- rcu_read_lock();
- if (test_bit(HCI_RESET, &hdev->flags) ||
- hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
- cancel_delayed_work(&hdev->cmd_timer);
- else
- queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
- HCI_CMD_TIMEOUT);
- rcu_read_unlock();
- } else {
- skb_queue_head(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
- }
+ rcu_read_lock();
+ if (test_bit(HCI_RESET, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
+ cancel_delayed_work(&hdev->cmd_timer);
+ else
+ queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
+ HCI_CMD_TIMEOUT);
+ rcu_read_unlock();
}
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 2a5f5a7d2412..4ae224824012 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -36,8 +36,6 @@
#include "hci_request.h"
#include "hci_debugfs.h"
#include "hci_codec.h"
-#include "a2mp.h"
-#include "amp.h"
#include "smp.h"
#include "msft.h"
#include "eir.h"
@@ -95,11 +93,11 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
/* It is possible that we receive Inquiry Complete event right
* before we receive Inquiry Cancel Command Complete event, in
* which case the latter event should have status of Command
- * Disallowed (0x0c). This should not be treated as error, since
+ * Disallowed. This should not be treated as error, since
* we actually achieve what Inquiry Cancel wants to achieve,
* which is to end the last Inquiry session.
*/
- if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
+ if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
rp->status = 0x00;
}
@@ -120,8 +118,6 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
hci_dev_unlock(hdev);
- hci_conn_check_pending(hdev);
-
return rp->status;
}
@@ -152,8 +148,6 @@ static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
- hci_conn_check_pending(hdev);
-
return rp->status;
}
@@ -2314,10 +2308,8 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
bt_dev_dbg(hdev, "status 0x%2.2x", status);
- if (status) {
- hci_conn_check_pending(hdev);
+ if (status)
return;
- }
if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
set_bit(HCI_INQUIRY, &hdev->flags);
@@ -2342,12 +2334,9 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
if (status) {
if (conn && conn->state == BT_CONNECT) {
- if (status != 0x0c || conn->attempt > 2) {
- conn->state = BT_CLOSED;
- hci_connect_cfm(conn, status);
- hci_conn_del(conn);
- } else
- conn->state = BT_CONNECT2;
+ conn->state = BT_CLOSED;
+ hci_connect_cfm(conn, status);
+ hci_conn_del(conn);
}
} else {
if (!conn) {
@@ -2526,9 +2515,7 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
* Only those in BT_CONFIG or BT_CONNECTED states can be
* considered connected.
*/
- if (conn &&
- (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
mgmt_device_connected(hdev, conn, name, name_len);
if (discov->state == DISCOVERY_STOPPED)
@@ -3039,8 +3026,6 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- hci_conn_check_pending(hdev);
-
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
return;
@@ -3262,8 +3247,6 @@ done:
unlock:
hci_dev_unlock(hdev);
-
- hci_conn_check_pending(hdev);
}
static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
@@ -3556,8 +3539,6 @@ static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- hci_conn_check_pending(hdev);
-
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -3660,7 +3641,8 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
* controller really supports it. If it doesn't, assume
* the default size (16).
*/
- if (!(hdev->commands[20] & 0x10)) {
+ if (!(hdev->commands[20] & 0x10) ||
+ test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) {
conn->enc_key_size = HCI_LINK_KEY_SIZE;
goto notify;
}
@@ -3762,8 +3744,9 @@ static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ } else {
mgmt_device_connected(hdev, conn, NULL, 0);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -3936,6 +3919,11 @@ static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
* last.
*/
hci_connect_cfm(conn, rp->status);
+
+ /* Notify device connected in case it is a BIG Sync */
+ if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
+ mgmt_device_connected(hdev, conn, NULL, 0);
+
break;
}
@@ -4381,7 +4369,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
* (since for this kind of commands there will not be a command
* complete event).
*/
- if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
+ if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
req_complete_skb);
if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
@@ -5010,8 +4998,9 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ } else {
mgmt_device_connected(hdev, conn, NULL, 0);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -5675,150 +5664,6 @@ unlock:
hci_dev_unlock(hdev);
}
-#if IS_ENABLED(CONFIG_BT_HS)
-static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_channel_selected *ev = data;
- struct hci_conn *hcon;
-
- bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- return;
-
- amp_read_loc_assoc_final_data(hdev, hcon);
-}
-
-static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_phy_link_complete *ev = data;
- struct hci_conn *hcon, *bredr_hcon;
-
- bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
- ev->status);
-
- hci_dev_lock(hdev);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- goto unlock;
-
- if (!hcon->amp_mgr)
- goto unlock;
-
- if (ev->status) {
- hci_conn_del(hcon);
- goto unlock;
- }
-
- bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
-
- hcon->state = BT_CONNECTED;
- bacpy(&hcon->dst, &bredr_hcon->dst);
-
- hci_conn_hold(hcon);
- hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
- hci_conn_drop(hcon);
-
- hci_debugfs_create_conn(hcon);
- hci_conn_add_sysfs(hcon);
-
- amp_physical_cfm(bredr_hcon, hcon);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
-static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_logical_link_complete *ev = data;
- struct hci_conn *hcon;
- struct hci_chan *hchan;
- struct amp_mgr *mgr;
-
- bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
- le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- return;
-
- /* Create AMP hchan */
- hchan = hci_chan_create(hcon);
- if (!hchan)
- return;
-
- hchan->handle = le16_to_cpu(ev->handle);
- hchan->amp = true;
-
- BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
-
- mgr = hcon->amp_mgr;
- if (mgr && mgr->bredr_chan) {
- struct l2cap_chan *bredr_chan = mgr->bredr_chan;
-
- l2cap_chan_lock(bredr_chan);
-
- bredr_chan->conn->mtu = hdev->block_mtu;
- l2cap_logical_cfm(bredr_chan, hchan, 0);
- hci_conn_hold(hcon);
-
- l2cap_chan_unlock(bredr_chan);
- }
-}
-
-static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_disconn_logical_link_complete *ev = data;
- struct hci_chan *hchan;
-
- bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
- le16_to_cpu(ev->handle), ev->status);
-
- if (ev->status)
- return;
-
- hci_dev_lock(hdev);
-
- hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
- if (!hchan || !hchan->amp)
- goto unlock;
-
- amp_destroy_logical_link(hchan, ev->reason);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
-static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_disconn_phy_link_complete *ev = data;
- struct hci_conn *hcon;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
-
- if (ev->status)
- return;
-
- hci_dev_lock(hdev);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (hcon && hcon->type == AMP_LINK) {
- hcon->state = BT_CLOSED;
- hci_disconn_cfm(hcon, ev->reason);
- hci_conn_del(hcon);
- }
-
- hci_dev_unlock(hdev);
-}
-#endif
-
static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
u8 bdaddr_type, bdaddr_t *local_rpa)
{
@@ -5984,8 +5829,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
goto unlock;
}
- if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, conn, NULL, 0);
+ mgmt_device_connected(hdev, conn, NULL, 0);
conn->sec_level = BT_SECURITY_LOW;
conn->state = BT_CONFIG;
@@ -6684,7 +6528,7 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
* transition into connected state and mark it as
* successful.
*/
- if (!conn->out && ev->status == 0x1a &&
+ if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
(hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
status = 0x00;
else
@@ -7214,6 +7058,9 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
/* Notify iso layer */
hci_connect_cfm(pa_sync, 0x00);
+ /* Notify MGMT layer */
+ mgmt_device_connected(hdev, pa_sync, NULL, 0);
+
unlock:
hci_dev_unlock(hdev);
}
@@ -7324,10 +7171,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
/* Only match event if command OGF is for LE */
- if (hdev->sent_cmd &&
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
- hci_skb_event(hdev->sent_cmd) == ev->subevent) {
- *opcode = hci_skb_opcode(hdev->sent_cmd);
+ if (hdev->req_skb &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
+ hci_skb_event(hdev->req_skb) == ev->subevent) {
+ *opcode = hci_skb_opcode(hdev->req_skb);
hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
req_complete_skb);
}
@@ -7626,25 +7473,6 @@ static const struct hci_ev {
/* [0x3e = HCI_EV_LE_META] */
HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
-#if IS_ENABLED(CONFIG_BT_HS)
- /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
- HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
- sizeof(struct hci_ev_phy_link_complete)),
- /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
- HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
- sizeof(struct hci_ev_channel_selected)),
- /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
- HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
- hci_disconn_loglink_complete_evt,
- sizeof(struct hci_ev_disconn_logical_link_complete)),
- /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
- HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
- sizeof(struct hci_ev_logical_link_complete)),
- /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
- HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
- hci_disconn_phylink_complete_evt,
- sizeof(struct hci_ev_disconn_phy_link_complete)),
-#endif
/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
sizeof(struct hci_ev_num_comp_blocks)),
@@ -7714,10 +7542,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
}
/* Only match event if command OGF is not for LE */
- if (hdev->sent_cmd &&
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
- hci_skb_event(hdev->sent_cmd) == event) {
- hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
+ if (hdev->req_skb &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
+ hci_skb_event(hdev->req_skb) == event) {
+ hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
status, &req_complete, &req_complete_skb);
req_evt = event;
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 6e023b0104b0..00e02138003e 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -895,7 +895,7 @@ void hci_request_setup(struct hci_dev *hdev)
void hci_request_cancel_all(struct hci_dev *hdev)
{
- __hci_cmd_sync_cancel(hdev, ENODEV);
+ hci_cmd_sync_cancel_sync(hdev, ENODEV);
cancel_interleave_scan(hdev);
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 3e7cd330d731..4ee1b976678b 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -101,7 +101,7 @@ static bool hci_sock_gen_cookie(struct sock *sk)
int id = hci_pi(sk)->cookie;
if (!id) {
- id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
+ id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL);
if (id < 0)
id = 0xffffffff;
@@ -119,7 +119,7 @@ static void hci_sock_free_cookie(struct sock *sk)
if (id) {
hci_pi(sk)->cookie = 0xffffffff;
- ida_simple_remove(&sock_cookie_ida, id);
+ ida_free(&sock_cookie_ida, id);
}
}
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 5716345a26df..f6b662369322 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -32,6 +32,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
+ /* Free the request command so it is not used as response */
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+
if (skb) {
struct sock *sk = hci_skb_sk(skb);
@@ -39,7 +43,7 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
if (sk)
sock_put(sk);
- hdev->req_skb = skb_get(skb);
+ hdev->req_rsp = skb_get(skb);
}
wake_up_interruptible(&hdev->req_wait_q);
@@ -187,8 +191,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
hdev->req_status = 0;
hdev->req_result = 0;
- skb = hdev->req_skb;
- hdev->req_skb = NULL;
+ skb = hdev->req_rsp;
+ hdev->req_rsp = NULL;
bt_dev_dbg(hdev, "end: err %d", err);
@@ -566,6 +570,17 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
}
+static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry,
+ int err)
+{
+ if (entry->destroy)
+ entry->destroy(hdev, entry->data, err);
+
+ list_del(&entry->list);
+ kfree(entry);
+}
+
void hci_cmd_sync_clear(struct hci_dev *hdev)
{
struct hci_cmd_sync_work_entry *entry, *tmp;
@@ -574,17 +589,12 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
cancel_work_sync(&hdev->reenable_adv_work);
mutex_lock(&hdev->cmd_sync_work_lock);
- list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
- if (entry->destroy)
- entry->destroy(hdev, entry->data, -ECANCELED);
-
- list_del(&entry->list);
- kfree(entry);
- }
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
mutex_unlock(&hdev->cmd_sync_work_lock);
}
-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
@@ -592,15 +602,17 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
- cancel_delayed_work_sync(&hdev->cmd_timer);
- cancel_delayed_work_sync(&hdev->ncmd_timer);
- atomic_set(&hdev->cmd_cnt, 1);
-
- wake_up_interruptible(&hdev->req_wait_q);
+ queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
}
}
+EXPORT_SYMBOL(hci_cmd_sync_cancel);
-void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+/* Cancel ongoing command request synchronously:
+ *
+ * - Set result and mark status to HCI_REQ_CANCELED
+ * - Wakeup command sync thread
+ */
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
@@ -608,10 +620,10 @@ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
- queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
+ wake_up_interruptible(&hdev->req_wait_q);
}
}
-EXPORT_SYMBOL(hci_cmd_sync_cancel);
+EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
/* Submit HCI command to be run in as cmd_sync_work:
*
@@ -667,6 +679,115 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
}
EXPORT_SYMBOL(hci_cmd_sync_queue);
+static struct hci_cmd_sync_work_entry *
+_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+ if (func && entry->func != func)
+ continue;
+
+ if (data && entry->data != data)
+ continue;
+
+ if (destroy && entry->destroy != destroy)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/* Queue HCI command entry once:
+ *
+ * - Lookup if an entry already exist and only if it doesn't creates a new entry
+ * and queue it.
+ */
+int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
+ return 0;
+
+ return hci_cmd_sync_queue(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_queue_once);
+
+/* Lookup HCI command entry:
+ *
+ * - Return first entry that matches by function callback or data or
+ * destroy callback.
+ */
+struct hci_cmd_sync_work_entry *
+hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ return entry;
+}
+EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
+
+/* Cancel HCI command entry */
+void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry)
+{
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+}
+EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
+
+/* Dequeue one HCI command entry:
+ *
+ * - Lookup and cancel first entry that matches.
+ */
+bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
+ hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ if (!entry)
+ return false;
+
+ hci_cmd_sync_cancel_entry(hdev, entry);
+
+ return true;
+}
+EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
+
+/* Dequeue HCI command entry:
+ *
+ * - Lookup and cancel any entry that matches by function callback or data or
+ * destroy callback.
+ */
+bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+ bool ret = false;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
+ destroy))) {
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+ ret = true;
+ }
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hci_cmd_sync_dequeue);
+
int hci_update_eir_sync(struct hci_dev *hdev)
{
struct hci_cp_write_eir cp;
@@ -2445,6 +2566,16 @@ static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
return p;
}
+/* Clear LE Accept List */
+static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[26] & 0x80))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
/* Device must not be scanning when updating the accept list.
*
* Update is done using the following sequence:
@@ -2493,6 +2624,31 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
goto done;
}
+ /* Force address filtering if PA Sync is in progress */
+ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
+ struct hci_cp_le_pa_create_sync *sent;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
+ if (sent) {
+ struct conn_params pa;
+
+ memset(&pa, 0, sizeof(pa));
+
+ bacpy(&pa.addr, &sent->addr);
+ pa.addr_type = sent->addr_type;
+
+ /* Clear first since there could be addresses left
+ * behind.
+ */
+ hci_le_clear_accept_list_sync(hdev);
+
+ num_entries = 1;
+ err = hci_le_add_accept_list_sync(hdev, &pa,
+ &num_entries);
+ goto done;
+ }
+ }
+
/* Go through the current accept list programmed into the
* controller one by one and check if that address is connected or is
* still in the list of pending connections or list of devices to
@@ -2602,6 +2758,14 @@ done:
return filter_policy;
}
+static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
+ u8 type, u16 interval, u16 window)
+{
+ cp->type = type;
+ cp->interval = cpu_to_le16(interval);
+ cp->window = cpu_to_le16(window);
+}
+
static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
u16 interval, u16 window,
u8 own_addr_type, u8 filter_policy)
@@ -2609,7 +2773,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
struct hci_cp_le_set_ext_scan_params *cp;
struct hci_cp_le_scan_phy_params *phy;
u8 data[sizeof(*cp) + sizeof(*phy) * 2];
- u8 num_phy = 0;
+ u8 num_phy = 0x00;
cp = (void *)data;
phy = (void *)cp->data;
@@ -2619,28 +2783,64 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
cp->own_addr_type = own_addr_type;
cp->filter_policy = filter_policy;
+ /* Check if PA Sync is in progress then select the PHY based on the
+ * hci_conn.iso_qos.
+ */
+ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
+ struct hci_cp_le_add_to_accept_list *sent;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
+ if (sent) {
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
+ &sent->bdaddr);
+ if (conn) {
+ struct bt_iso_qos *qos = &conn->iso_qos;
+
+ if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
+ qos->bcast.in.phy & BT_ISO_PHY_2M) {
+ cp->scanning_phys |= LE_SCAN_PHY_1M;
+ hci_le_scan_phy_params(phy, type,
+ interval,
+ window);
+ num_phy++;
+ phy++;
+ }
+
+ if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
+ cp->scanning_phys |= LE_SCAN_PHY_CODED;
+ hci_le_scan_phy_params(phy, type,
+ interval,
+ window);
+ num_phy++;
+ phy++;
+ }
+
+ if (num_phy)
+ goto done;
+ }
+ }
+ }
+
if (scan_1m(hdev) || scan_2m(hdev)) {
cp->scanning_phys |= LE_SCAN_PHY_1M;
-
- phy->type = type;
- phy->interval = cpu_to_le16(interval);
- phy->window = cpu_to_le16(window);
-
+ hci_le_scan_phy_params(phy, type, interval, window);
num_phy++;
phy++;
}
if (scan_coded(hdev)) {
cp->scanning_phys |= LE_SCAN_PHY_CODED;
-
- phy->type = type;
- phy->interval = cpu_to_le16(interval);
- phy->window = cpu_to_le16(window);
-
+ hci_le_scan_phy_params(phy, type, interval, window);
num_phy++;
phy++;
}
+done:
+ if (!num_phy)
+ return -EINVAL;
+
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
sizeof(*cp) + sizeof(*phy) * num_phy,
data, HCI_CMD_TIMEOUT);
@@ -2879,7 +3079,8 @@ int hci_update_passive_scan(struct hci_dev *hdev)
hci_dev_test_flag(hdev, HCI_UNREGISTER))
return 0;
- return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL);
+ return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
+ NULL);
}
int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
@@ -4098,16 +4299,6 @@ static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
0, NULL, HCI_CMD_TIMEOUT);
}
-/* Clear LE Accept List */
-static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
-{
- if (!(hdev->commands[26] & 0x80))
- return 0;
-
- return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
- HCI_CMD_TIMEOUT);
-}
-
/* Read LE Resolving List Size */
static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
{
@@ -4834,6 +5025,11 @@ int hci_dev_open_sync(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
+ if (hdev->req_skb) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+ }
+
clear_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
@@ -4994,6 +5190,12 @@ int hci_dev_close_sync(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
+ /* Drop last request */
+ if (hdev->req_skb) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+ }
+
clear_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
@@ -5403,27 +5605,33 @@ static int hci_power_off_sync(struct hci_dev *hdev)
if (!test_bit(HCI_UP, &hdev->flags))
return 0;
+ hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
+
if (test_bit(HCI_ISCAN, &hdev->flags) ||
test_bit(HCI_PSCAN, &hdev->flags)) {
err = hci_write_scan_enable_sync(hdev, 0x00);
if (err)
- return err;
+ goto out;
}
err = hci_clear_adv_sync(hdev, NULL, false);
if (err)
- return err;
+ goto out;
err = hci_stop_discovery_sync(hdev);
if (err)
- return err;
+ goto out;
/* Terminated due to Power Off */
err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
if (err)
- return err;
+ goto out;
+
+ err = hci_dev_close_sync(hdev);
- return hci_dev_close_sync(hdev);
+out:
+ hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
+ return err;
}
int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
@@ -6161,12 +6369,21 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
conn->conn_timeout, NULL);
}
-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
+static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
{
struct hci_cp_le_create_conn cp;
struct hci_conn_params *params;
u8 own_addr_type;
int err;
+ struct hci_conn *conn = data;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ bt_dev_dbg(hdev, "conn %p", conn);
+
+ clear_bit(HCI_CONN_SCANNING, &conn->flags);
+ conn->state = BT_CONNECT;
/* If requested to connect as peripheral use directed advertising */
if (conn->role == HCI_ROLE_SLAVE) {
@@ -6484,3 +6701,125 @@ int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
UINT_PTR(instance), NULL);
}
+
+static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
+{
+ struct hci_conn *conn = data;
+ struct inquiry_entry *ie;
+ struct hci_cp_create_conn cp;
+ int err;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ /* Many controllers disallow HCI Create Connection while it is doing
+ * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
+ * Connection. This may cause the MGMT discovering state to become false
+ * without user space's request but it is okay since the MGMT Discovery
+ * APIs do not promise that discovery should be done forever. Instead,
+ * the user space monitors the status of MGMT discovering and it may
+ * request for discovery again when this flag becomes false.
+ */
+ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
+ NULL, HCI_CMD_TIMEOUT);
+ if (err)
+ bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
+ }
+
+ conn->state = BT_CONNECT;
+ conn->out = true;
+ conn->role = HCI_ROLE_MASTER;
+
+ conn->attempt++;
+
+ conn->link_policy = hdev->link_policy;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pscan_rep_mode = 0x02;
+
+ ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
+ if (ie) {
+ if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
+ cp.pscan_rep_mode = ie->data.pscan_rep_mode;
+ cp.pscan_mode = ie->data.pscan_mode;
+ cp.clock_offset = ie->data.clock_offset |
+ cpu_to_le16(0x8000);
+ }
+
+ memcpy(conn->dev_class, ie->data.dev_class, 3);
+ }
+
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
+ if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
+ cp.role_switch = 0x01;
+ else
+ cp.role_switch = 0x00;
+
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
+ sizeof(cp), &cp,
+ HCI_EV_CONN_COMPLETE,
+ conn->conn_timeout, NULL);
+}
+
+int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
+ NULL);
+}
+
+static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct hci_conn *conn = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (err == -ECANCELED)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (!hci_conn_valid(hdev, conn))
+ goto done;
+
+ if (!err) {
+ hci_connect_le_scan_cleanup(conn, 0x00);
+ goto done;
+ }
+
+ /* Check if connection is still pending */
+ if (conn != hci_lookup_le_connect(hdev))
+ goto done;
+
+ /* Flush to make sure we send create conn cancel command if needed */
+ flush_delayed_work(&conn->le_conn_timeout);
+ hci_conn_failed(conn, bt_status(err));
+
+done:
+ hci_dev_unlock(hdev);
+}
+
+int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
+ create_le_conn_complete);
+}
+
+int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ if (conn->state != BT_OPEN)
+ return -EINVAL;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ return !hci_cmd_sync_dequeue_once(hdev,
+ hci_acl_create_conn_sync,
+ conn, NULL);
+ case LE_LINK:
+ return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
+ conn, create_le_conn_complete);
+ }
+
+ return -ENOENT;
+}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 04f6572d35f1..c8793e57f4b5 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -3,7 +3,7 @@
* BlueZ - Bluetooth protocol stack for Linux
*
* Copyright (C) 2022 Intel Corporation
- * Copyright 2023 NXP
+ * Copyright 2023-2024 NXP
*/
#include <linux/module.h>
@@ -690,11 +690,8 @@ static void iso_sock_cleanup_listen(struct sock *parent)
iso_sock_kill(sk);
}
- /* If listening socket stands for a PA sync connection,
- * properly disconnect the hcon and socket.
- */
- if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon &&
- test_bit(HCI_CONN_PA_SYNC, &iso_pi(parent)->conn->hcon->flags)) {
+ /* If listening socket has a hcon, properly disconnect it */
+ if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon) {
iso_sock_disconn(parent);
return;
}
@@ -837,10 +834,10 @@ static struct bt_iso_qos default_qos = {
.bcode = {0x00},
.options = 0x00,
.skip = 0x0000,
- .sync_timeout = 0x4000,
+ .sync_timeout = BT_ISO_SYNC_TIMEOUT,
.sync_cte_type = 0x00,
.mse = 0x00,
- .timeout = 0x4000,
+ .timeout = BT_ISO_SYNC_TIMEOUT,
},
};
@@ -1076,6 +1073,8 @@ static int iso_listen_bis(struct sock *sk)
{
struct hci_dev *hdev;
int err = 0;
+ struct iso_conn *conn;
+ struct hci_conn *hcon;
BT_DBG("%pMR -> %pMR (SID 0x%2.2x)", &iso_pi(sk)->src,
&iso_pi(sk)->dst, iso_pi(sk)->bc_sid);
@@ -1096,18 +1095,40 @@ static int iso_listen_bis(struct sock *sk)
if (!hdev)
return -EHOSTUNREACH;
+ hci_dev_lock(hdev);
+
/* Fail if user set invalid QoS */
if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) {
iso_pi(sk)->qos = default_qos;
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ hcon = hci_pa_create_sync(hdev, &iso_pi(sk)->dst,
+ le_addr_type(iso_pi(sk)->dst_type),
+ iso_pi(sk)->bc_sid, &iso_pi(sk)->qos);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto unlock;
}
- err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst,
- le_addr_type(iso_pi(sk)->dst_type),
- iso_pi(sk)->bc_sid, &iso_pi(sk)->qos);
+ conn = iso_conn_add(hcon);
+ if (!conn) {
+ hci_conn_drop(hcon);
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ err = iso_chan_add(conn, sk, NULL);
+ if (err) {
+ hci_conn_drop(hcon);
+ goto unlock;
+ }
hci_dev_put(hdev);
+unlock:
+ hci_dev_unlock(hdev);
return err;
}
@@ -1889,7 +1910,6 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
struct hci_evt_le_big_info_adv_report *ev2;
struct hci_ev_le_per_adv_report *ev3;
struct sock *sk;
- int lm = 0;
bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
@@ -1933,7 +1953,7 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
if (sk && test_bit(BT_SK_PA_SYNC_TERM,
&iso_pi(sk)->flags))
- return lm;
+ return 0;
}
if (sk) {
@@ -1961,16 +1981,58 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT);
if (ev3) {
- size_t base_len = ev3->length;
+ size_t base_len = 0;
u8 *base;
+ struct hci_conn *hcon;
sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
iso_match_sync_handle_pa_report, ev3);
- base = eir_get_service_data(ev3->data, ev3->length,
- EIR_BAA_SERVICE_UUID, &base_len);
- if (base && sk && base_len <= sizeof(iso_pi(sk)->base)) {
+ if (!sk)
+ goto done;
+
+ hcon = iso_pi(sk)->conn->hcon;
+ if (!hcon)
+ goto done;
+
+ if (ev3->data_status == LE_PA_DATA_TRUNCATED) {
+ /* The controller was unable to retrieve PA data. */
+ memset(hcon->le_per_adv_data, 0,
+ HCI_MAX_PER_AD_TOT_LEN);
+ hcon->le_per_adv_data_len = 0;
+ hcon->le_per_adv_data_offset = 0;
+ goto done;
+ }
+
+ if (hcon->le_per_adv_data_offset + ev3->length >
+ HCI_MAX_PER_AD_TOT_LEN)
+ goto done;
+
+ memcpy(hcon->le_per_adv_data + hcon->le_per_adv_data_offset,
+ ev3->data, ev3->length);
+ hcon->le_per_adv_data_offset += ev3->length;
+
+ if (ev3->data_status == LE_PA_DATA_COMPLETE) {
+ /* All PA data has been received. */
+ hcon->le_per_adv_data_len =
+ hcon->le_per_adv_data_offset;
+ hcon->le_per_adv_data_offset = 0;
+
+ /* Extract BASE */
+ base = eir_get_service_data(hcon->le_per_adv_data,
+ hcon->le_per_adv_data_len,
+ EIR_BAA_SERVICE_UUID,
+ &base_len);
+
+ if (!base || base_len > BASE_MAX_LENGTH)
+ goto done;
+
memcpy(iso_pi(sk)->base, base, base_len);
iso_pi(sk)->base_len = base_len;
+ } else {
+ /* This is a PA data fragment. Keep pa_data_len set to 0
+ * until all data has been reassembled.
+ */
+ hcon->le_per_adv_data_len = 0;
}
} else {
sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL);
@@ -1978,16 +2040,14 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
done:
if (!sk)
- return lm;
-
- lm |= HCI_LM_ACCEPT;
+ return 0;
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
*flags |= HCI_PROTO_DEFER;
sock_put(sk);
- return lm;
+ return HCI_LM_ACCEPT;
}
static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 656f49b299d2..467b242d8be0 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -39,8 +39,6 @@
#include <net/bluetooth/l2cap.h>
#include "smp.h"
-#include "a2mp.h"
-#include "amp.h"
#define LE_FLOWCTL_MAX_CREDITS 65535
@@ -167,24 +165,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
return NULL;
}
-static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
- u8 ident)
-{
- struct l2cap_chan *c;
-
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- if (c) {
- /* Only lock if chan reference is not 0 */
- c = l2cap_chan_hold_unless_zero(c);
- if (c)
- l2cap_chan_lock(c);
- }
- mutex_unlock(&conn->chan_lock);
-
- return c;
-}
-
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
u8 src_type)
{
@@ -651,7 +631,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
chan->ops->teardown(chan, err);
if (conn) {
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
/* Delete from channel list */
list_del(&chan->list);
@@ -666,16 +645,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
if (chan->chan_type != L2CAP_CHAN_FIXED ||
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
hci_conn_drop(conn->hcon);
-
- if (mgr && mgr->bredr_chan == chan)
- mgr->bredr_chan = NULL;
- }
-
- if (chan->hs_hchan) {
- struct hci_chan *hs_hchan = chan->hs_hchan;
-
- BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
- amp_disconnect_logical_link(hs_hchan);
}
if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
@@ -977,12 +946,6 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
hci_send_acl(conn->hchan, skb, flags);
}
-static bool __chan_is_moving(struct l2cap_chan *chan)
-{
- return chan->move_state != L2CAP_MOVE_STABLE &&
- chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
-}
-
static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
{
struct hci_conn *hcon = chan->conn->hcon;
@@ -991,15 +954,6 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
skb->priority);
- if (chan->hs_hcon && !__chan_is_moving(chan)) {
- if (chan->hs_hchan)
- hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
- else
- kfree_skb(skb);
-
- return;
- }
-
/* Use NO_FLUSH for LE links (where this is the only option) or
* if the BR/EDR link supports it and flushing has not been
* explicitly requested (through FLAG_FLUSHABLE).
@@ -1180,9 +1134,6 @@ static void l2cap_send_sframe(struct l2cap_chan *chan,
if (!control->sframe)
return;
- if (__chan_is_moving(chan))
- return;
-
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
!control->poll)
control->final = 1;
@@ -1237,40 +1188,6 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
}
-static bool __amp_capable(struct l2cap_chan *chan)
-{
- struct l2cap_conn *conn = chan->conn;
- struct hci_dev *hdev;
- bool amp_available = false;
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return false;
-
- if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
- return false;
-
- read_lock(&hci_dev_list_lock);
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->amp_type != AMP_TYPE_BREDR &&
- test_bit(HCI_UP, &hdev->flags)) {
- amp_available = true;
- break;
- }
- }
- read_unlock(&hci_dev_list_lock);
-
- if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
- return amp_available;
-
- return false;
-}
-
-static bool l2cap_check_efs(struct l2cap_chan *chan)
-{
- /* Check EFS parameters */
- return true;
-}
-
void l2cap_send_conn_req(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
@@ -1286,76 +1203,6 @@ void l2cap_send_conn_req(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}
-static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
-{
- struct l2cap_create_chan_req req;
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
- req.amp_id = amp_id;
-
- chan->ident = l2cap_get_ident(chan->conn);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
- sizeof(req), &req);
-}
-
-static void l2cap_move_setup(struct l2cap_chan *chan)
-{
- struct sk_buff *skb;
-
- BT_DBG("chan %p", chan);
-
- if (chan->mode != L2CAP_MODE_ERTM)
- return;
-
- __clear_retrans_timer(chan);
- __clear_monitor_timer(chan);
- __clear_ack_timer(chan);
-
- chan->retry_count = 0;
- skb_queue_walk(&chan->tx_q, skb) {
- if (bt_cb(skb)->l2cap.retries)
- bt_cb(skb)->l2cap.retries = 1;
- else
- break;
- }
-
- chan->expected_tx_seq = chan->buffer_seq;
-
- clear_bit(CONN_REJ_ACT, &chan->conn_state);
- clear_bit(CONN_SREJ_ACT, &chan->conn_state);
- l2cap_seq_list_clear(&chan->retrans_list);
- l2cap_seq_list_clear(&chan->srej_list);
- skb_queue_purge(&chan->srej_q);
-
- chan->tx_state = L2CAP_TX_STATE_XMIT;
- chan->rx_state = L2CAP_RX_STATE_MOVE;
-
- set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-}
-
-static void l2cap_move_done(struct l2cap_chan *chan)
-{
- u8 move_role = chan->move_role;
- BT_DBG("chan %p", chan);
-
- chan->move_state = L2CAP_MOVE_STABLE;
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
-
- if (chan->mode != L2CAP_MODE_ERTM)
- return;
-
- switch (move_role) {
- case L2CAP_MOVE_ROLE_INITIATOR:
- l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
- chan->rx_state = L2CAP_RX_STATE_WAIT_F;
- break;
- case L2CAP_MOVE_ROLE_RESPONDER:
- chan->rx_state = L2CAP_RX_STATE_WAIT_P;
- break;
- }
-}
-
static void l2cap_chan_ready(struct l2cap_chan *chan)
{
/* The channel may have already been flagged as connected in
@@ -1505,10 +1352,7 @@ static void l2cap_le_start(struct l2cap_chan *chan)
static void l2cap_start_connection(struct l2cap_chan *chan)
{
- if (__amp_capable(chan)) {
- BT_DBG("chan %p AMP capable: discover AMPs", chan);
- a2mp_discover_amp(chan);
- } else if (chan->conn->hcon->type == LE_LINK) {
+ if (chan->conn->hcon->type == LE_LINK) {
l2cap_le_start(chan);
} else {
l2cap_send_conn_req(chan);
@@ -1611,11 +1455,6 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
__clear_ack_timer(chan);
}
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_state_change(chan, BT_DISCONN);
- return;
- }
-
req.dcid = cpu_to_le16(chan->dcid);
req.scid = cpu_to_le16(chan->scid);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
@@ -1754,11 +1593,6 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
l2cap_chan_lock(chan);
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_chan_unlock(chan);
- continue;
- }
-
if (hcon->type == LE_LINK) {
l2cap_le_start(chan);
} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -2067,9 +1901,6 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
BT_DBG("chan %p, skbs %p", chan, skbs);
- if (__chan_is_moving(chan))
- return;
-
skb_queue_splice_tail_init(skbs, &chan->tx_q);
while (!skb_queue_empty(&chan->tx_q)) {
@@ -2112,9 +1943,6 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return 0;
- if (__chan_is_moving(chan))
- return 0;
-
while (chan->tx_send_head &&
chan->unacked_frames < chan->remote_tx_win &&
chan->tx_state == L2CAP_TX_STATE_XMIT) {
@@ -2180,9 +2008,6 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return;
- if (__chan_is_moving(chan))
- return;
-
while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
seq = l2cap_seq_list_pop(&chan->retrans_list);
@@ -2522,8 +2347,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
pdu_len = chan->conn->mtu;
/* Constrain PDU size for BR/EDR connections */
- if (!chan->hs_hcon)
- pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
/* Adjust for largest possible L2CAP overhead. */
if (chan->fcs)
@@ -3287,11 +3111,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->tx_q);
- chan->local_amp_id = AMP_ID_BREDR;
- chan->move_id = AMP_ID_BREDR;
- chan->move_state = L2CAP_MOVE_STABLE;
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
-
if (chan->mode != L2CAP_MODE_ERTM)
return 0;
@@ -3326,52 +3145,19 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
{
- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
- (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
+ return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
}
static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
{
- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
- (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
+ return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
}
static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
struct l2cap_conf_rfc *rfc)
{
- if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
- u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
-
- /* Class 1 devices have must have ERTM timeouts
- * exceeding the Link Supervision Timeout. The
- * default Link Supervision Timeout for AMP
- * controllers is 10 seconds.
- *
- * Class 1 devices use 0xffffffff for their
- * best-effort flush timeout, so the clamping logic
- * will result in a timeout that meets the above
- * requirement. ERTM timeouts are 16-bit values, so
- * the maximum timeout is 65.535 seconds.
- */
-
- /* Convert timeout to milliseconds and round */
- ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
-
- /* This is the recommended formula for class 2 devices
- * that start ERTM timers when packets are sent to the
- * controller.
- */
- ertm_to = 3 * ertm_to + 500;
-
- if (ertm_to > 0xffff)
- ertm_to = 0xffff;
-
- rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
- rfc->monitor_timeout = rfc->retrans_timeout;
- } else {
- rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- }
+ rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
}
static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
@@ -3623,13 +3409,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
case L2CAP_CONF_EWS:
if (olen != 2)
break;
- if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -ECONNREFUSED;
- set_bit(FLAG_EXT_CTRL, &chan->flags);
- set_bit(CONF_EWS_RECV, &chan->conf_state);
- chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
- chan->remote_tx_win = val;
- break;
+ return -ECONNREFUSED;
default:
if (hint)
@@ -4027,11 +3807,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
rsp.dcid = cpu_to_le16(chan->scid);
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-
- if (chan->hs_hcon)
- rsp_code = L2CAP_CREATE_CHAN_RSP;
- else
- rsp_code = L2CAP_CONN_RSP;
+ rsp_code = L2CAP_CONN_RSP;
BT_DBG("chan %p rsp_code %u", chan, rsp_code);
@@ -4190,7 +3966,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
chan->dst_type = bdaddr_dst_type(conn->hcon);
chan->psm = psm;
chan->dcid = scid;
- chan->local_amp_id = amp_id;
__l2cap_chan_add(conn, chan);
@@ -4516,10 +4291,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
/* check compatibility */
/* Send rsp for BR/EDR channel */
- if (!chan->hs_hcon)
- l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
- else
- chan->ident = cmd->ident;
+ l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
}
unlock:
@@ -4571,15 +4343,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
goto done;
}
- if (!chan->hs_hcon) {
- l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
- 0);
- } else {
- if (l2cap_check_efs(chan)) {
- amp_create_logical_link(chan);
- chan->ident = cmd->ident;
- }
- }
+ l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
}
goto done;
@@ -4750,9 +4514,6 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
- if (conn->local_fixed_chan & L2CAP_FC_A2MP)
- feat_mask |= L2CAP_FEAT_EXT_FLOW
- | L2CAP_FEAT_EXT_WINDOW;
put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
@@ -4841,751 +4602,6 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
return 0;
}
-static int l2cap_create_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_create_chan_req *req = data;
- struct l2cap_create_chan_rsp rsp;
- struct l2cap_chan *chan;
- struct hci_dev *hdev;
- u16 psm, scid;
-
- if (cmd_len != sizeof(*req))
- return -EPROTO;
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -EINVAL;
-
- psm = le16_to_cpu(req->psm);
- scid = le16_to_cpu(req->scid);
-
- BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
-
- /* For controller id 0 make BR/EDR connection */
- if (req->amp_id == AMP_ID_BREDR) {
- l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
- req->amp_id);
- return 0;
- }
-
- /* Validate AMP controller id */
- hdev = hci_dev_get(req->amp_id);
- if (!hdev)
- goto error;
-
- if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
- hci_dev_put(hdev);
- goto error;
- }
-
- chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
- req->amp_id);
- if (chan) {
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
- struct hci_conn *hs_hcon;
-
- hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
- &conn->hcon->dst);
- if (!hs_hcon) {
- hci_dev_put(hdev);
- cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
- chan->dcid);
- return 0;
- }
-
- BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
-
- mgr->bredr_chan = chan;
- chan->hs_hcon = hs_hcon;
- chan->fcs = L2CAP_FCS_NONE;
- conn->mtu = hdev->block_mtu;
- }
-
- hci_dev_put(hdev);
-
- return 0;
-
-error:
- rsp.dcid = 0;
- rsp.scid = cpu_to_le16(scid);
- rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- return 0;
-}
-
-static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
-{
- struct l2cap_move_chan_req req;
- u8 ident;
-
- BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
-
- ident = l2cap_get_ident(chan->conn);
- chan->ident = ident;
-
- req.icid = cpu_to_le16(chan->scid);
- req.dest_amp_id = dest_amp_id;
-
- l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
- &req);
-
- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
-}
-
-static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
-{
- struct l2cap_move_chan_rsp rsp;
-
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
-
- rsp.icid = cpu_to_le16(chan->dcid);
- rsp.result = cpu_to_le16(result);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
- sizeof(rsp), &rsp);
-}
-
-static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
-{
- struct l2cap_move_chan_cfm cfm;
-
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
-
- chan->ident = l2cap_get_ident(chan->conn);
-
- cfm.icid = cpu_to_le16(chan->scid);
- cfm.result = cpu_to_le16(result);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
- sizeof(cfm), &cfm);
-
- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
-}
-
-static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
-{
- struct l2cap_move_chan_cfm cfm;
-
- BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
-
- cfm.icid = cpu_to_le16(icid);
- cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
- sizeof(cfm), &cfm);
-}
-
-static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid)
-{
- struct l2cap_move_chan_cfm_rsp rsp;
-
- BT_DBG("icid 0x%4.4x", icid);
-
- rsp.icid = cpu_to_le16(icid);
- l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
-}
-
-static void __release_logical_link(struct l2cap_chan *chan)
-{
- chan->hs_hchan = NULL;
- chan->hs_hcon = NULL;
-
- /* Placeholder - release the logical link */
-}
-
-static void l2cap_logical_fail(struct l2cap_chan *chan)
-{
- /* Logical link setup failed */
- if (chan->state != BT_CONNECTED) {
- /* Create channel failure, disconnect */
- l2cap_send_disconn_req(chan, ECONNRESET);
- return;
- }
-
- switch (chan->move_role) {
- case L2CAP_MOVE_ROLE_RESPONDER:
- l2cap_move_done(chan);
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
- break;
- case L2CAP_MOVE_ROLE_INITIATOR:
- if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
- chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
- /* Remote has only sent pending or
- * success responses, clean up
- */
- l2cap_move_done(chan);
- }
-
- /* Other amp move states imply that the move
- * has already aborted
- */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- break;
- }
-}
-
-static void l2cap_logical_finish_create(struct l2cap_chan *chan,
- struct hci_chan *hchan)
-{
- struct l2cap_conf_rsp rsp;
-
- chan->hs_hchan = hchan;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
-
- if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
- int err;
-
- set_default_fcs(chan);
-
- err = l2cap_ertm_init(chan);
- if (err < 0)
- l2cap_send_disconn_req(chan, -err);
- else
- l2cap_chan_ready(chan);
- }
-}
-
-static void l2cap_logical_finish_move(struct l2cap_chan *chan,
- struct hci_chan *hchan)
-{
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- BT_DBG("move_state %d", chan->move_state);
-
- switch (chan->move_state) {
- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
- /* Move confirm will be sent after a success
- * response is received
- */
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- break;
- case L2CAP_MOVE_WAIT_LOGICAL_CFM:
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
- }
- break;
- default:
- /* Move was not in expected state, free the channel */
- __release_logical_link(chan);
-
- chan->move_state = L2CAP_MOVE_STABLE;
- }
-}
-
-/* Call with chan locked */
-void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
- u8 status)
-{
- BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
-
- if (status) {
- l2cap_logical_fail(chan);
- __release_logical_link(chan);
- return;
- }
-
- if (chan->state != BT_CONNECTED) {
- /* Ignore logical link if channel is on BR/EDR */
- if (chan->local_amp_id != AMP_ID_BREDR)
- l2cap_logical_finish_create(chan, hchan);
- } else {
- l2cap_logical_finish_move(chan, hchan);
- }
-}
-
-void l2cap_move_start(struct l2cap_chan *chan)
-{
- BT_DBG("chan %p", chan);
-
- if (chan->local_amp_id == AMP_ID_BREDR) {
- if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
- return;
- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
- /* Placeholder - start physical link setup */
- } else {
- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- chan->move_id = 0;
- l2cap_move_setup(chan);
- l2cap_send_move_chan_req(chan, 0);
- }
-}
-
-static void l2cap_do_create(struct l2cap_chan *chan, int result,
- u8 local_amp_id, u8 remote_amp_id)
-{
- BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
- local_amp_id, remote_amp_id);
-
- chan->fcs = L2CAP_FCS_NONE;
-
- /* Outgoing channel on AMP */
- if (chan->state == BT_CONNECT) {
- if (result == L2CAP_CR_SUCCESS) {
- chan->local_amp_id = local_amp_id;
- l2cap_send_create_chan_req(chan, remote_amp_id);
- } else {
- /* Revert to BR/EDR connect */
- l2cap_send_conn_req(chan);
- }
-
- return;
- }
-
- /* Incoming channel on AMP */
- if (__l2cap_no_conn_pending(chan)) {
- struct l2cap_conn_rsp rsp;
- char buf[128];
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
-
- if (result == L2CAP_CR_SUCCESS) {
- /* Send successful response */
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- } else {
- /* Send negative response */
- rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- }
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- if (result == L2CAP_CR_SUCCESS) {
- l2cap_state_change(chan, BT_CONFIG);
- set_bit(CONF_REQ_SENT, &chan->conf_state);
- l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
- L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- chan->num_conf_req++;
- }
- }
-}
-
-static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
- u8 remote_amp_id)
-{
- l2cap_move_setup(chan);
- chan->move_id = local_amp_id;
- chan->move_state = L2CAP_MOVE_WAIT_RSP;
-
- l2cap_send_move_chan_req(chan, remote_amp_id);
-}
-
-static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
-{
- struct hci_chan *hchan = NULL;
-
- /* Placeholder - get hci_chan for logical link */
-
- if (hchan) {
- if (hchan->state == BT_CONNECTED) {
- /* Logical link is ready to go */
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
-
- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
- } else {
- /* Wait for logical link to be ready */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- }
- } else {
- /* Logical link not available */
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
- }
-}
-
-static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
-{
- if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
- u8 rsp_result;
- if (result == -EINVAL)
- rsp_result = L2CAP_MR_BAD_ID;
- else
- rsp_result = L2CAP_MR_NOT_ALLOWED;
-
- l2cap_send_move_chan_rsp(chan, rsp_result);
- }
-
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
- chan->move_state = L2CAP_MOVE_STABLE;
-
- /* Restart data transmission */
- l2cap_ertm_send(chan);
-}
-
-/* Invoke with locked chan */
-void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
-{
- u8 local_amp_id = chan->local_amp_id;
- u8 remote_amp_id = chan->remote_amp_id;
-
- BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
- chan, result, local_amp_id, remote_amp_id);
-
- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
- return;
-
- if (chan->state != BT_CONNECTED) {
- l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
- } else if (result != L2CAP_MR_SUCCESS) {
- l2cap_do_move_cancel(chan, result);
- } else {
- switch (chan->move_role) {
- case L2CAP_MOVE_ROLE_INITIATOR:
- l2cap_do_move_initiate(chan, local_amp_id,
- remote_amp_id);
- break;
- case L2CAP_MOVE_ROLE_RESPONDER:
- l2cap_do_move_respond(chan, result);
- break;
- default:
- l2cap_do_move_cancel(chan, result);
- break;
- }
- }
-}
-
-static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_req *req = data;
- struct l2cap_move_chan_rsp rsp;
- struct l2cap_chan *chan;
- u16 icid = 0;
- u16 result = L2CAP_MR_NOT_ALLOWED;
-
- if (cmd_len != sizeof(*req))
- return -EPROTO;
-
- icid = le16_to_cpu(req->icid);
-
- BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -EINVAL;
-
- chan = l2cap_get_chan_by_dcid(conn, icid);
- if (!chan) {
- rsp.icid = cpu_to_le16(icid);
- rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
- sizeof(rsp), &rsp);
- return 0;
- }
-
- chan->ident = cmd->ident;
-
- if (chan->scid < L2CAP_CID_DYN_START ||
- chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
- (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING)) {
- result = L2CAP_MR_NOT_ALLOWED;
- goto send_move_response;
- }
-
- if (chan->local_amp_id == req->dest_amp_id) {
- result = L2CAP_MR_SAME_ID;
- goto send_move_response;
- }
-
- if (req->dest_amp_id != AMP_ID_BREDR) {
- struct hci_dev *hdev;
- hdev = hci_dev_get(req->dest_amp_id);
- if (!hdev || hdev->dev_type != HCI_AMP ||
- !test_bit(HCI_UP, &hdev->flags)) {
- if (hdev)
- hci_dev_put(hdev);
-
- result = L2CAP_MR_BAD_ID;
- goto send_move_response;
- }
- hci_dev_put(hdev);
- }
-
- /* Detect a move collision. Only send a collision response
- * if this side has "lost", otherwise proceed with the move.
- * The winner has the larger bd_addr.
- */
- if ((__chan_is_moving(chan) ||
- chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
- bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
- result = L2CAP_MR_COLLISION;
- goto send_move_response;
- }
-
- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
- l2cap_move_setup(chan);
- chan->move_id = req->dest_amp_id;
-
- if (req->dest_amp_id == AMP_ID_BREDR) {
- /* Moving to BR/EDR */
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- result = L2CAP_MR_PEND;
- } else {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- result = L2CAP_MR_SUCCESS;
- }
- } else {
- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
- /* Placeholder - uncomment when amp functions are available */
- /*amp_accept_physical(chan, req->dest_amp_id);*/
- result = L2CAP_MR_PEND;
- }
-
-send_move_response:
- l2cap_send_move_chan_rsp(chan, result);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
-static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
-{
- struct l2cap_chan *chan;
- struct hci_chan *hchan = NULL;
-
- chan = l2cap_get_chan_by_scid(conn, icid);
- if (!chan) {
- l2cap_send_move_chan_cfm_icid(conn, icid);
- return;
- }
-
- __clear_chan_timer(chan);
- if (result == L2CAP_MR_PEND)
- __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
-
- switch (chan->move_state) {
- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
- /* Move confirm will be sent when logical link
- * is complete.
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- break;
- case L2CAP_MOVE_WAIT_RSP_SUCCESS:
- if (result == L2CAP_MR_PEND) {
- break;
- } else if (test_bit(CONN_LOCAL_BUSY,
- &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- } else {
- /* Logical link is up or moving to BR/EDR,
- * proceed with move
- */
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- }
- break;
- case L2CAP_MOVE_WAIT_RSP:
- /* Moving to AMP */
- if (result == L2CAP_MR_SUCCESS) {
- /* Remote is ready, send confirm immediately
- * after logical link is ready
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- } else {
- /* Both logical link and move success
- * are required to confirm
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
- }
-
- /* Placeholder - get hci_chan for logical link */
- if (!hchan) {
- /* Logical link not available */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- break;
- }
-
- /* If the logical link is not yet connected, do not
- * send confirmation.
- */
- if (hchan->state != BT_CONNECTED)
- break;
-
- /* Logical link is already ready to go */
-
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- if (result == L2CAP_MR_SUCCESS) {
- /* Can confirm now */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- } else {
- /* Now only need move success
- * to confirm
- */
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- }
-
- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
- break;
- default:
- /* Any other amp move state means the move failed. */
- chan->move_id = chan->local_amp_id;
- l2cap_move_done(chan);
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- }
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-}
-
-static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
- u16 result)
-{
- struct l2cap_chan *chan;
-
- chan = l2cap_get_chan_by_ident(conn, ident);
- if (!chan) {
- /* Could not locate channel, icid is best guess */
- l2cap_send_move_chan_cfm_icid(conn, icid);
- return;
- }
-
- __clear_chan_timer(chan);
-
- if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
- if (result == L2CAP_MR_COLLISION) {
- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
- } else {
- /* Cleanup - cancel move */
- chan->move_id = chan->local_amp_id;
- l2cap_move_done(chan);
- }
- }
-
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-}
-
-static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_rsp *rsp = data;
- u16 icid, result;
-
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
-
- icid = le16_to_cpu(rsp->icid);
- result = le16_to_cpu(rsp->result);
-
- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
-
- if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
- l2cap_move_continue(conn, icid, result);
- else
- l2cap_move_fail(conn, cmd->ident, icid, result);
-
- return 0;
-}
-
-static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_cfm *cfm = data;
- struct l2cap_chan *chan;
- u16 icid, result;
-
- if (cmd_len != sizeof(*cfm))
- return -EPROTO;
-
- icid = le16_to_cpu(cfm->icid);
- result = le16_to_cpu(cfm->result);
-
- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
-
- chan = l2cap_get_chan_by_dcid(conn, icid);
- if (!chan) {
- /* Spec requires a response even if the icid was not found */
- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
- return 0;
- }
-
- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
- if (result == L2CAP_MC_CONFIRMED) {
- chan->local_amp_id = chan->move_id;
- if (chan->local_amp_id == AMP_ID_BREDR)
- __release_logical_link(chan);
- } else {
- chan->move_id = chan->local_amp_id;
- }
-
- l2cap_move_done(chan);
- }
-
- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
-static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_cfm_rsp *rsp = data;
- struct l2cap_chan *chan;
- u16 icid;
-
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
-
- icid = le16_to_cpu(rsp->icid);
-
- BT_DBG("icid 0x%4.4x", icid);
-
- chan = l2cap_get_chan_by_scid(conn, icid);
- if (!chan)
- return 0;
-
- __clear_chan_timer(chan);
-
- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
- chan->local_amp_id = chan->move_id;
-
- if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
- __release_logical_link(chan);
-
- l2cap_move_done(chan);
- }
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd,
u16 cmd_len, u8 *data)
@@ -5745,7 +4761,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
break;
case L2CAP_CONN_RSP:
- case L2CAP_CREATE_CHAN_RSP:
l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
break;
@@ -5780,26 +4795,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
l2cap_information_rsp(conn, cmd, cmd_len, data);
break;
- case L2CAP_CREATE_CHAN_REQ:
- err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_REQ:
- err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_RSP:
- l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM:
- err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM_RSP:
- l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
- break;
-
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -7051,8 +6046,8 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
if (control->final) {
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
- !__chan_is_moving(chan)) {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
control->final = 0;
l2cap_retransmit_all(chan, control);
}
@@ -7245,11 +6240,7 @@ static int l2cap_finish_move(struct l2cap_chan *chan)
BT_DBG("chan %p", chan);
chan->rx_state = L2CAP_RX_STATE_RECV;
-
- if (chan->hs_hcon)
- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
- else
- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
return l2cap_resegment(chan);
}
@@ -7316,11 +6307,7 @@ static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
*/
chan->next_tx_seq = control->reqseq;
chan->unacked_frames = 0;
-
- if (chan->hs_hcon)
- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
- else
- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
err = l2cap_resegment(chan);
@@ -7672,21 +6659,10 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
chan = l2cap_get_chan_by_scid(conn, cid);
if (!chan) {
- if (cid == L2CAP_CID_A2MP) {
- chan = a2mp_channel_create(conn, skb);
- if (!chan) {
- kfree_skb(skb);
- return;
- }
-
- l2cap_chan_hold(chan);
- l2cap_chan_lock(chan);
- } else {
- BT_DBG("unknown cid 0x%4.4x", cid);
- /* Drop packet and return */
- kfree_skb(skb);
- return;
- }
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ /* Drop packet and return */
+ kfree_skb(skb);
+ return;
}
BT_DBG("chan %p, len %d", chan, skb->len);
@@ -7887,10 +6863,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
- if (hcon->type == ACL_LINK &&
- hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
- conn->local_fixed_chan |= L2CAP_FC_A2MP;
-
if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
(bredr_sc_enabled(hcon->hdev) ||
hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
@@ -7953,7 +6925,7 @@ static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
}
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
- bdaddr_t *dst, u8 dst_type)
+ bdaddr_t *dst, u8 dst_type, u16 timeout)
{
struct l2cap_conn *conn;
struct hci_conn *hcon;
@@ -8046,19 +7018,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
hcon = hci_connect_le(hdev, dst, dst_type, false,
- chan->sec_level,
- HCI_LE_CONN_TIMEOUT,
+ chan->sec_level, timeout,
HCI_ROLE_SLAVE);
else
hcon = hci_connect_le_scan(hdev, dst, dst_type,
- chan->sec_level,
- HCI_LE_CONN_TIMEOUT,
+ chan->sec_level, timeout,
CONN_REASON_L2CAP_CHAN);
} else {
u8 auth_type = l2cap_get_auth_type(chan);
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
- CONN_REASON_L2CAP_CHAN);
+ CONN_REASON_L2CAP_CHAN, timeout);
}
if (IS_ERR(hcon)) {
@@ -8355,11 +7325,6 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
state_to_string(chan->state));
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_chan_unlock(chan);
- continue;
- }
-
if (!status && encrypt)
chan->sec_level = hcon->sec_level;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e50d3d102078..4287aa6cc988 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -254,7 +254,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
chan->mode = L2CAP_MODE_LE_FLOWCTL;
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
- &la.l2_bdaddr, la.l2_bdaddr_type);
+ &la.l2_bdaddr, la.l2_bdaddr_type,
+ sk->sk_sndtimeo);
if (err)
return err;
@@ -1027,23 +1028,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
- err = -EINVAL;
- break;
- }
-
- if (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING) {
- err = -EOPNOTSUPP;
- break;
- }
-
- chan->chan_policy = (u8) opt;
-
- if (sk->sk_state == BT_CONNECTED &&
- chan->move_role == L2CAP_MOVE_ROLE_NONE)
- l2cap_move_start(chan);
-
+ err = -EOPNOTSUPP;
break;
case BT_SNDMTU:
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ee3b4aad8bd8..32ed6e9245a3 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -835,8 +835,6 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_ssp_capable(hdev)) {
settings |= MGMT_SETTING_SSP;
- if (IS_ENABLED(CONFIG_BT_HS))
- settings |= MGMT_SETTING_HS;
}
if (lmp_sc_capable(hdev))
@@ -901,9 +899,6 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
settings |= MGMT_SETTING_SSP;
- if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
- settings |= MGMT_SETTING_HS;
-
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
settings |= MGMT_SETTING_ADVERTISING;
@@ -1390,6 +1385,14 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
+ if (!cp->val) {
+ if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+ }
+
if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
MGMT_STATUS_BUSY);
@@ -1409,7 +1412,7 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
/* Cancel potentially blocking sync operation before power off */
if (cp->val == 0x00) {
- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
+ hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
mgmt_set_powered_complete);
} else {
@@ -1704,8 +1707,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
new_settings(hdev, cmd->sk);
done:
- if (cmd)
- mgmt_pending_remove(cmd);
+ mgmt_pending_remove(cmd);
hci_dev_unlock(hdev);
}
@@ -1930,7 +1932,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
if (enable && hci_dev_test_and_clear_flag(hdev,
HCI_SSP_ENABLED)) {
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
new_settings(hdev, NULL);
}
@@ -1943,12 +1944,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
} else {
changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
-
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
@@ -2012,11 +2007,6 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
} else {
changed = hci_dev_test_and_clear_flag(hdev,
HCI_SSP_ENABLED);
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -2062,63 +2052,10 @@ failed:
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
- struct mgmt_mode *cp = data;
- bool changed;
- u8 status;
- int err;
-
bt_dev_dbg(hdev, "sock %p", sk);
- if (!IS_ENABLED(CONFIG_BT_HS))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
MGMT_STATUS_NOT_SUPPORTED);
-
- status = mgmt_bredr_support(hdev);
- if (status)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
-
- if (!lmp_ssp_capable(hdev))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_NOT_SUPPORTED);
-
- if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
-
- if (cp->val != 0x00 && cp->val != 0x01)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_INVALID_PARAMS);
-
- hci_dev_lock(hdev);
-
- if (pending_find(MGMT_OP_SET_SSP, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
- if (cp->val) {
- changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
- } else {
- if (hdev_is_powered(hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
- goto unlock;
- }
-
- changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
- if (err < 0)
- goto unlock;
-
- if (changed)
- err = new_settings(hdev, sk);
-
-unlock:
- hci_dev_unlock(hdev);
- return err;
}
static void set_le_complete(struct hci_dev *hdev, void *data, int err)
@@ -3188,6 +3125,7 @@ failed:
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
{
switch (link_type) {
+ case ISO_LINK:
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
@@ -3505,7 +3443,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->addr.type == BDADDR_BREDR) {
conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
- auth_type, CONN_REASON_PAIR_DEVICE);
+ auth_type, CONN_REASON_PAIR_DEVICE,
+ HCI_ACL_CONN_TIMEOUT);
} else {
u8 addr_type = le_addr_type(cp->addr.type);
struct hci_conn_params *p;
@@ -6766,7 +6705,6 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
@@ -8470,7 +8408,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
static u8 calculate_name_len(struct hci_dev *hdev)
{
- u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
+ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
return eir_append_local_name(hdev, buf, 0);
}
@@ -8829,8 +8767,7 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
}
unlock:
- if (cmd)
- mgmt_pending_free(cmd);
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
@@ -9681,6 +9618,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
u16 eir_len = 0;
u32 flags = 0;
+ if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ return;
+
/* allocate buff for LE or BR/EDR adv */
if (conn->le_adv_data_len > 0)
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
@@ -9748,6 +9688,9 @@ bool mgmt_powering_down(struct hci_dev *hdev)
struct mgmt_pending_cmd *cmd;
struct mgmt_mode *cp;
+ if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
+ return true;
+
cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
if (!cmd)
return false;
@@ -9766,14 +9709,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
struct mgmt_ev_device_disconnected ev;
struct sock *sk = NULL;
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
-
if (!mgmt_connected)
return;
@@ -9830,14 +9765,6 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
{
struct mgmt_ev_connect_failed ev;
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
-
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
@@ -10071,6 +9998,9 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
/* If this is a HCI command related to powering on the
* HCI dev don't send any mgmt signals.
*/
+ if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
+ return;
+
if (pending_find(MGMT_OP_SET_POWERED, hdev))
return;
}
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index 630e3023273b..9612c5d1b13f 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -875,6 +875,7 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
remove = true;
goto done;
}
+
cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
cp->rssi_high = address_filter->rssi_high;
cp->rssi_low = address_filter->rssi_low;
@@ -887,6 +888,8 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp,
HCI_CMD_TIMEOUT);
+ kfree(cp);
+
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to enable address %pMR filter",
&address_filter->bdaddr);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index c736186aba26..43daf965a01e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -264,7 +264,8 @@ static int sco_connect(struct sock *sk)
}
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
- sco_pi(sk)->setting, &sco_pi(sk)->codec);
+ sco_pi(sk)->setting, &sco_pi(sk)->codec,
+ sk->sk_sndtimeo);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto unlock;
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 8906f7bdf4a9..de33dc1b0daa 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -7,7 +7,7 @@
#include <linux/bpf.h>
#include <linux/btf.h>
-extern struct bpf_struct_ops bpf_bpf_dummy_ops;
+static struct bpf_struct_ops bpf_bpf_dummy_ops;
/* A common type for test_N with return value in bpf_dummy_ops */
typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
@@ -22,6 +22,8 @@ struct bpf_dummy_ops_test_args {
struct bpf_dummy_ops_state state;
};
+static struct btf *bpf_dummy_ops_btf;
+
static struct bpf_dummy_ops_test_args *
dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
{
@@ -89,10 +91,17 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
struct bpf_tramp_link *link = NULL;
void *image = NULL;
unsigned int op_idx;
+ u32 image_off = 0;
int prog_ret;
+ s32 type_id;
int err;
- if (prog->aux->attach_btf_id != st_ops->type_id)
+ type_id = btf_find_by_name_kind(bpf_dummy_ops_btf,
+ bpf_bpf_dummy_ops.name,
+ BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ if (prog->aux->attach_btf_id != type_id)
return -EOPNOTSUPP;
func_proto = prog->aux->attach_func_proto;
@@ -106,12 +115,6 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
goto out;
}
- image = arch_alloc_bpf_trampoline(PAGE_SIZE);
- if (!image) {
- err = -ENOMEM;
- goto out;
- }
-
link = kzalloc(sizeof(*link), GFP_USER);
if (!link) {
err = -ENOMEM;
@@ -125,7 +128,8 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
&st_ops->func_models[op_idx],
&dummy_ops_test_ret_function,
- image, image + PAGE_SIZE);
+ &image, &image_off,
+ true);
if (err < 0)
goto out;
@@ -139,7 +143,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
err = -EFAULT;
out:
kfree(args);
- arch_free_bpf_trampoline(image, PAGE_SIZE);
+ bpf_struct_ops_image_free(image);
if (link)
bpf_link_put(&link->link);
kfree(tlinks);
@@ -148,6 +152,7 @@ out:
static int bpf_dummy_init(struct btf *btf)
{
+ bpf_dummy_ops_btf = btf;
return 0;
}
@@ -169,7 +174,7 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t,
case offsetof(struct bpf_dummy_ops, test_sleepable):
break;
default:
- if (prog->aux->sleepable)
+ if (prog->sleepable)
return -EINVAL;
}
@@ -247,7 +252,7 @@ static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
.test_sleepable = bpf_dummy_test_sleepable,
};
-struct bpf_struct_ops bpf_bpf_dummy_ops = {
+static struct bpf_struct_ops bpf_bpf_dummy_ops = {
.verifier_ops = &bpf_dummy_verifier_ops,
.init = bpf_dummy_init,
.check_member = bpf_dummy_ops_check_member,
@@ -256,4 +261,11 @@ struct bpf_struct_ops bpf_bpf_dummy_ops = {
.unreg = bpf_dummy_unreg,
.name = "bpf_dummy_ops",
.cfi_stubs = &__bpf_bpf_dummy_ops,
+ .owner = THIS_MODULE,
};
+
+static int __init bpf_dummy_struct_ops_init(void)
+{
+ return register_bpf_struct_ops(&bpf_bpf_dummy_ops, bpf_dummy_ops);
+}
+late_initcall(bpf_dummy_struct_ops_init);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index dfd919374017..61efeadaff8d 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -12,6 +12,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/sched/signal.h>
#include <net/bpf_sk_storage.h>
+#include <net/hotdata.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <net/net_namespace.h>
@@ -254,7 +255,8 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
int i, n;
LIST_HEAD(list);
- n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
+ n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
+ (void **)skbs);
if (unlikely(n == 0)) {
for (i = 0; i < nframes; i++)
xdp_return_frame(frames[i]);
@@ -617,21 +619,21 @@ CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_test_modify_return_ids)
+BTF_KFUNCS_START(bpf_test_modify_return_ids)
BTF_ID_FLAGS(func, bpf_modify_return_test)
BTF_ID_FLAGS(func, bpf_modify_return_test2)
BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
-BTF_SET8_END(bpf_test_modify_return_ids)
+BTF_KFUNCS_END(bpf_test_modify_return_ids)
static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
.owner = THIS_MODULE,
.set = &bpf_test_modify_return_ids,
};
-BTF_SET8_START(test_sk_check_kfunc_ids)
+BTF_KFUNCS_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
-BTF_SET8_END(test_sk_check_kfunc_ids)
+BTF_KFUNCS_END(test_sk_check_kfunc_ids)
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
u32 size, u32 headroom, u32 tailroom)
diff --git a/net/bridge/br.c b/net/bridge/br.c
index ac19b797dbec..2cab878e0a39 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -356,26 +356,21 @@ void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on)
clear_bit(opt, &br->options);
}
-static void __net_exit br_net_exit_batch(struct list_head *net_list)
+static void __net_exit br_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net_device *dev;
struct net *net;
- LIST_HEAD(list);
-
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
for_each_netdev(net, dev)
if (netif_is_bridge_master(dev))
- br_dev_delete(dev, &list);
-
- unregister_netdevice_many(&list);
-
- rtnl_unlock();
+ br_dev_delete(dev, dev_to_kill);
}
static struct pernet_operations br_net_ops = {
- .exit_batch = br_net_exit_batch,
+ .exit_batch_rtnl = br_net_exit_batch_rtnl,
};
static const struct stp_proto br_stp_proto = {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 65cee0ad3c1b..c366ccc8b3db 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -108,38 +108,23 @@ out:
return NETDEV_TX_OK;
}
-static struct lock_class_key bridge_netdev_addr_lock_key;
-
-static void br_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
-}
-
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = br_fdb_hash_init(br);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = br_mdb_hash_init(br);
if (err) {
- free_percpu(dev->tstats);
br_fdb_hash_fini(br);
return err;
}
err = br_vlan_init(br);
if (err) {
- free_percpu(dev->tstats);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
return err;
@@ -147,14 +132,14 @@ static int br_dev_init(struct net_device *dev)
err = br_multicast_init_stats(br);
if (err) {
- free_percpu(dev->tstats);
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
+ return err;
}
- br_set_lockdep_class(dev);
- return err;
+ netdev_lockdep_set_classes(dev);
+ return 0;
}
static void br_dev_uninit(struct net_device *dev)
@@ -166,7 +151,6 @@ static void br_dev_uninit(struct net_device *dev)
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
- free_percpu(dev->tstats);
}
static int br_dev_open(struct net_device *dev)
@@ -481,7 +465,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_fill_forward_path = br_fill_forward_path,
};
-static struct device_type br_type = {
+static const struct device_type br_type = {
.name = "bridge",
};
@@ -503,6 +487,7 @@ void br_dev_setup(struct net_device *dev)
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
br->dev = dev;
spin_lock_init(&br->lock);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c622de5eccd0..c77591e63841 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -35,10 +35,7 @@ static struct kmem_cache *br_fdb_cache __read_mostly;
int __init br_fdb_init(void)
{
- br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
- sizeof(struct net_bridge_fdb_entry),
- 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN);
if (!br_fdb_cache)
return -ENOMEM;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5ad4abfcb7ba..2cf4fc756263 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -455,7 +455,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
u32 filter_mask, const struct net_device *dev,
bool getlink)
{
- u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
+ u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) :
+ IF_OPER_DOWN;
struct nlattr *af = NULL;
struct net_bridge *br;
struct ifinfomsg *hdr;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 15f44d026e75..9c2fffb827ab 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -841,7 +841,7 @@ void br_vlan_flush(struct net_bridge *br)
vg = br_vlan_group(br);
__vlan_flush(br, NULL, vg);
RCU_INIT_POINTER(br->vlgrp, NULL);
- synchronize_rcu();
+ synchronize_net();
__vlan_group_free(vg);
}
@@ -1372,7 +1372,7 @@ void nbp_vlan_flush(struct net_bridge_port *port)
vg = nbp_vlan_group(port);
__vlan_flush(port->br, port, vg);
RCU_INIT_POINTER(port->vlgrp, NULL);
- synchronize_rcu();
+ synchronize_net();
__vlan_group_free(vg);
}
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 7f304a19ac1b..104c0125e32e 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -39,6 +39,10 @@ config NF_CONNTRACK_BRIDGE
To compile it as a module, choose M here. If unsure, say N.
+# old sockopt interface and eval loop
+config BRIDGE_NF_EBTABLES_LEGACY
+ tristate
+
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
@@ -55,6 +59,7 @@ if BRIDGE_NF_EBTABLES
#
config BRIDGE_EBT_BROUTE
tristate "ebt: broute table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables broute table is used to define rules that decide between
bridging and routing frames, giving Linux the functionality of a
@@ -65,6 +70,7 @@ config BRIDGE_EBT_BROUTE
config BRIDGE_EBT_T_FILTER
tristate "ebt: filter table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables filter table is used to define frame filtering rules at
local input, forwarding and local output. See the man page for
@@ -74,6 +80,7 @@ config BRIDGE_EBT_T_FILTER
config BRIDGE_EBT_T_NAT
tristate "ebt: nat table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables nat table is used to define rules that alter the MAC
source address (MAC SNAT) or the MAC destination address (MAC DNAT).
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 1c9ce49ab651..b9a1303da977 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_NFT_BRIDGE_REJECT) += nft_reject_bridge.o
# connection tracking
obj-$(CONFIG_NF_CONNTRACK_BRIDGE) += nf_conntrack_bridge.o
-obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
+obj-$(CONFIG_BRIDGE_NF_EBTABLES_LEGACY) += ebtables.o
# tables
obj-$(CONFIG_BRIDGE_EBT_BROUTE) += ebtable_broute.o
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7343fd487dbe..707576eeeb58 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -865,6 +865,8 @@ static __init int can_init(void)
/* check for correct padding to be able to use the structs similarly */
BUILD_BUG_ON(offsetof(struct can_frame, len) !=
offsetof(struct canfd_frame, len) ||
+ offsetof(struct can_frame, len) !=
+ offsetof(struct canxl_frame, flags) ||
offsetof(struct can_frame, data) !=
offsetof(struct canfd_frame, data));
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9168114fc87f..27d5fcf0eac9 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -72,9 +72,11 @@
#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
/* use of last_frames[index].flags */
+#define RX_LOCAL 0x10 /* frame was created on the local host */
+#define RX_OWN 0x20 /* frame was sent via the socket it was received on */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
-#define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
+#define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */
/* get best masking value for can_rx_register() for a given single can_id */
#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
@@ -138,6 +140,16 @@ static LIST_HEAD(bcm_notifier_list);
static DEFINE_SPINLOCK(bcm_notifier_lock);
static struct bcm_sock *bcm_busy_notifier;
+/* Return pointer to store the extra msg flags for bcm_recvmsg().
+ * We use the space of one unsigned int beyond the 'struct sockaddr_can'
+ * in skb->cb.
+ */
+static inline unsigned int *bcm_flags(struct sk_buff *skb)
+{
+ /* return pointer after struct sockaddr_can */
+ return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
+}
+
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
{
return (struct bcm_sock *)sk;
@@ -325,6 +337,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
struct sock *sk = op->sk;
unsigned int datalen = head->nframes * op->cfsiz;
int err;
+ unsigned int *pflags;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
if (!skb)
@@ -332,6 +345,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
skb_put_data(skb, head, sizeof(*head));
+ /* ensure space for sockaddr_can and msg flags */
+ sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
+ sizeof(unsigned int));
+
+ /* initialize msg flags */
+ pflags = bcm_flags(skb);
+ *pflags = 0;
+
if (head->nframes) {
/* CAN frames starting here */
firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
@@ -344,8 +365,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* relevant for updates that are generated by the
* BCM, where nframes is 1
*/
- if (head->nframes == 1)
+ if (head->nframes == 1) {
+ if (firstframe->flags & RX_LOCAL)
+ *pflags |= MSG_DONTROUTE;
+ if (firstframe->flags & RX_OWN)
+ *pflags |= MSG_CONFIRM;
+
firstframe->flags &= BCM_CAN_FLAGS_MASK;
+ }
}
if (has_timestamp) {
@@ -360,7 +387,6 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* containing the interface index.
*/
- sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
@@ -444,7 +470,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
op->frames_filtered = op->frames_abs = 0;
/* this element is not throttled anymore */
- data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ data->flags &= ~RX_THR;
memset(&head, 0, sizeof(head));
head.opcode = RX_CHANGED;
@@ -465,13 +491,17 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
*/
static void bcm_rx_update_and_send(struct bcm_op *op,
struct canfd_frame *lastdata,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
memcpy(lastdata, rxdata, op->cfsiz);
/* mark as used and throttled by default */
lastdata->flags |= (RX_RECV|RX_THR);
+ /* add own/local/remote traffic flags */
+ lastdata->flags |= traffic_flags;
+
/* throttling mode inactive ? */
if (!op->kt_ival2) {
/* send RX_CHANGED to the user immediately */
@@ -508,7 +538,8 @@ rx_changed_settime:
* received data stored in op->last_frames[]
*/
static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
struct canfd_frame *cf = op->frames + op->cfsiz * index;
struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
@@ -521,7 +552,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (!(lcf->flags & RX_RECV)) {
/* received data for the first time => send update to user */
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
@@ -529,7 +560,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
for (i = 0; i < rxdata->len; i += 8) {
if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
(get_u64(cf, i) & get_u64(lcf, i))) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -537,7 +568,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (op->flags & RX_CHECK_DLC) {
/* do a real check in CAN frame length */
if (rxdata->len != lcf->len) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -644,6 +675,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
struct bcm_op *op = (struct bcm_op *)data;
const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
unsigned int i;
+ unsigned char traffic_flags;
if (op->can_id != rxframe->can_id)
return;
@@ -673,15 +705,24 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
return;
}
+ /* compute flags to distinguish between own/local/remote CAN traffic */
+ traffic_flags = 0;
+ if (skb->sk) {
+ traffic_flags |= RX_LOCAL;
+ if (skb->sk == op->sk)
+ traffic_flags |= RX_OWN;
+ }
+
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
- bcm_rx_update_and_send(op, op->last_frames, rxframe);
+ bcm_rx_update_and_send(op, op->last_frames, rxframe,
+ traffic_flags);
goto rx_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
- bcm_rx_cmp_to_index(op, 0, rxframe);
+ bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags);
goto rx_starttimer;
}
@@ -698,7 +739,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
(get_u64(op->frames, 0) &
get_u64(op->frames + op->cfsiz * i, 0))) {
- bcm_rx_cmp_to_index(op, i, rxframe);
+ bcm_rx_cmp_to_index(op, i, rxframe,
+ traffic_flags);
break;
}
}
@@ -1675,6 +1717,9 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
+ /* assign the flags that have been recorded in bcm_send_to_user() */
+ msg->msg_flags |= *(bcm_flags(skb));
+
skb_free_datagram(sk, skb);
return size;
diff --git a/net/can/isotp.c b/net/can/isotp.c
index d1c6f206f429..25bac0fafc83 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -381,8 +381,9 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
return 1;
}
- /* get communication parameters only from the first FC frame */
- if (so->tx.state == ISOTP_WAIT_FIRST_FC) {
+ /* get static/dynamic communication params from first/every FC frame */
+ if (so->tx.state == ISOTP_WAIT_FIRST_FC ||
+ so->opt.flags & CAN_ISOTP_DYN_FC_PARMS) {
so->txfc.bs = cf->data[ae + 1];
so->txfc.stmin = cf->data[ae + 2];
diff --git a/net/can/raw.c b/net/can/raw.c
index e6b822624ba2..00533f64d69d 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -91,6 +91,10 @@ struct raw_sock {
int recv_own_msgs;
int fd_frames;
int xl_frames;
+ struct can_raw_vcid_options raw_vcid_opts;
+ canid_t tx_vcid_shifted;
+ canid_t rx_vcid_shifted;
+ canid_t rx_vcid_mask_shifted;
int join_filters;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
@@ -134,10 +138,29 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
return;
/* make sure to not pass oversized frames to the socket */
- if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
- (!ro->xl_frames && can_is_canxl_skb(oskb)))
+ if (!ro->fd_frames && can_is_canfd_skb(oskb))
return;
+ if (can_is_canxl_skb(oskb)) {
+ struct canxl_frame *cxl = (struct canxl_frame *)oskb->data;
+
+ /* make sure to not pass oversized frames to the socket */
+ if (!ro->xl_frames)
+ return;
+
+ /* filter CAN XL VCID content */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_RX_FILTER) {
+ /* apply VCID filter if user enabled the filter */
+ if ((cxl->prio & ro->rx_vcid_mask_shifted) !=
+ (ro->rx_vcid_shifted & ro->rx_vcid_mask_shifted))
+ return;
+ } else {
+ /* no filter => do not forward VCID tagged frames */
+ if (cxl->prio & CANXL_VCID_MASK)
+ return;
+ }
+ }
+
/* eliminate multiple filter matches for the same skb */
if (this_cpu_ptr(ro->uniq)->skb == oskb &&
this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
@@ -698,6 +721,19 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
ro->fd_frames = ro->xl_frames;
break;
+ case CAN_RAW_XL_VCID_OPTS:
+ if (optlen != sizeof(ro->raw_vcid_opts))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&ro->raw_vcid_opts, optval, optlen))
+ return -EFAULT;
+
+ /* prepare 32 bit values for handling in hot path */
+ ro->tx_vcid_shifted = ro->raw_vcid_opts.tx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_shifted = ro->raw_vcid_opts.rx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_mask_shifted = ro->raw_vcid_opts.rx_vcid_mask << CANXL_VCID_OFFSET;
+ break;
+
case CAN_RAW_JOIN_FILTERS:
if (optlen != sizeof(ro->join_filters))
return -EINVAL;
@@ -720,7 +756,6 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
struct raw_sock *ro = raw_sk(sk);
int len;
void *val;
- int err = 0;
if (level != SOL_CAN_RAW)
return -EINVAL;
@@ -730,7 +765,9 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
switch (optname) {
- case CAN_RAW_FILTER:
+ case CAN_RAW_FILTER: {
+ int err = 0;
+
lock_sock(sk);
if (ro->count > 0) {
int fsize = ro->count * sizeof(struct can_filter);
@@ -755,7 +792,7 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
if (!err)
err = put_user(len, optlen);
return err;
-
+ }
case CAN_RAW_ERR_FILTER:
if (len > sizeof(can_err_mask_t))
len = sizeof(can_err_mask_t);
@@ -786,6 +823,25 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
val = &ro->xl_frames;
break;
+ case CAN_RAW_XL_VCID_OPTS: {
+ int err = 0;
+
+ /* user space buffer to small for VCID opts? */
+ if (len < sizeof(ro->raw_vcid_opts)) {
+ /* return -ERANGE and needed space in optlen */
+ err = -ERANGE;
+ if (put_user(sizeof(ro->raw_vcid_opts), optlen))
+ err = -EFAULT;
+ } else {
+ if (len > sizeof(ro->raw_vcid_opts))
+ len = sizeof(ro->raw_vcid_opts);
+ if (copy_to_user(optval, &ro->raw_vcid_opts, len))
+ err = -EFAULT;
+ }
+ if (!err)
+ err = put_user(len, optlen);
+ return err;
+ }
case CAN_RAW_JOIN_FILTERS:
if (len > sizeof(int))
len = sizeof(int);
@@ -803,23 +859,41 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
-static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
+static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
+{
+ struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ /* sanitize non CAN XL bits */
+ cxl->prio &= (CANXL_PRIO_MASK | CANXL_VCID_MASK);
+
+ /* clear VCID in CAN XL frame if pass through is disabled */
+ if (!(ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_PASS))
+ cxl->prio &= CANXL_PRIO_MASK;
+
+ /* set VCID in CAN XL frame if enabled */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_SET) {
+ cxl->prio &= CANXL_PRIO_MASK;
+ cxl->prio |= ro->tx_vcid_shifted;
+ }
+}
+
+static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
{
/* Classical CAN -> no checks for flags and device capabilities */
if (can_is_can_skb(skb))
- return false;
+ return CAN_MTU;
/* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
if (ro->fd_frames && can_is_canfd_skb(skb) &&
(mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
- return false;
+ return CANFD_MTU;
/* CAN XL -> needs to be enabled and a CAN XL device */
if (ro->xl_frames && can_is_canxl_skb(skb) &&
can_is_canxl_dev_mtu(mtu))
- return false;
+ return CANXL_MTU;
- return true;
+ return 0;
}
static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
@@ -829,6 +903,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct sockcm_cookie sockc;
struct sk_buff *skb;
struct net_device *dev;
+ unsigned int txmtu;
int ifindex;
int err = -EINVAL;
@@ -869,9 +944,16 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
goto free_skb;
err = -EINVAL;
- if (raw_bad_txframe(ro, skb, dev->mtu))
+
+ /* check for valid CAN (CC/FD/XL) frame content */
+ txmtu = raw_check_txframe(ro, skb, dev->mtu);
+ if (!txmtu)
goto free_skb;
+ /* only CANXL: clear/forward/set VCID value */
+ if (txmtu == CANXL_MTU)
+ raw_put_canxl_vcid(ro, skb);
+
sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
diff --git a/net/core/Makefile b/net/core/Makefile
index 821aec06abf1..6e6548011fae 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -18,6 +18,7 @@ obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
+obj-y += hotdata.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
diff --git a/net/core/dev.c b/net/core/dev.c
index a892f7265189..0766a245816b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -153,41 +153,21 @@
#include <linux/prandom.h>
#include <linux/once_lite.h>
#include <net/netdev_rx_queue.h>
+#include <net/page_pool/types.h>
+#include <net/page_pool/helpers.h>
+#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
-struct list_head ptype_all __read_mostly; /* Taps */
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev,
struct netlink_ext_ack *extack);
-/*
- * The @dev_base_head list is protected by @dev_base_lock and the rtnl
- * semaphore.
- *
- * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
- *
- * Writers must hold the rtnl semaphore while they loop through the
- * dev_base_head list, and hold dev_base_lock for writing when they do the
- * actual updates. This allows pure readers to access the list even
- * while a writer is preparing to update it.
- *
- * To put it another way, dev_base_lock is held for writing only to
- * protect against pure readers; the rtnl semaphore provides the
- * protection against other writers.
- *
- * See, for example usages, register_netdevice() and
- * unregister_netdevice(), which must be called with the rtnl
- * semaphore held.
- */
-DEFINE_RWLOCK(dev_base_lock);
-EXPORT_SYMBOL(dev_base_lock);
-
static DEFINE_MUTEX(ifalias_mutex);
/* protects napi_hash addition/deletion and napi_gen_id */
@@ -200,8 +180,9 @@ static DECLARE_RWSEM(devnet_rename_sem);
static inline void dev_base_seq_inc(struct net *net)
{
- while (++net->dev_base_seq == 0)
- ;
+ unsigned int val = net->dev_base_seq + 1;
+
+ WRITE_ONCE(net->dev_base_seq, val ?: 1);
}
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
@@ -341,13 +322,22 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
return 0;
}
-static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+static void netdev_name_node_alt_free(struct rcu_head *head)
{
- list_del(&name_node->list);
+ struct netdev_name_node *name_node =
+ container_of(head, struct netdev_name_node, rcu);
+
kfree(name_node->name);
netdev_name_node_free(name_node);
}
+static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+{
+ netdev_name_node_del(name_node);
+ list_del(&name_node->list);
+ call_rcu(&name_node->rcu, netdev_name_node_alt_free);
+}
+
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
@@ -362,10 +352,7 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
if (name_node == dev->name_node || name_node->dev != dev)
return -EINVAL;
- netdev_name_node_del(name_node);
- synchronize_rcu();
__netdev_name_node_alt_destroy(name_node);
-
return 0;
}
@@ -373,8 +360,10 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
{
struct netdev_name_node *name_node, *tmp;
- list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
- __netdev_name_node_alt_destroy(name_node);
+ list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) {
+ list_del(&name_node->list);
+ netdev_name_node_alt_free(&name_node->rcu);
+ }
}
/* Device list insertion */
@@ -385,12 +374,10 @@ static void list_netdevice(struct net_device *dev)
ASSERT_RTNL();
- write_lock(&dev_base_lock);
list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
netdev_name_node_add(net, dev->name_node);
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
- write_unlock(&dev_base_lock);
netdev_for_each_altname(dev, name_node)
netdev_name_node_add(net, name_node);
@@ -404,7 +391,7 @@ static void list_netdevice(struct net_device *dev)
/* Device list removal
* caller must respect a RCU grace period before freeing/reusing dev
*/
-static void unlist_netdevice(struct net_device *dev, bool lock)
+static void unlist_netdevice(struct net_device *dev)
{
struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
@@ -417,13 +404,9 @@ static void unlist_netdevice(struct net_device *dev, bool lock)
netdev_name_node_del(name_node);
/* Unlink dev from the device chain */
- if (lock)
- write_lock(&dev_base_lock);
list_del_rcu(&dev->dev_list);
netdev_name_node_del(dev->name_node);
hlist_del_rcu(&dev->index_hlist);
- if (lock)
- write_unlock(&dev_base_lock);
dev_base_seq_inc(dev_net(dev));
}
@@ -442,6 +425,12 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
EXPORT_PER_CPU_SYMBOL(softnet_data);
+/* Page_pool has a lockless array/stack to alloc/recycle pages.
+ * PP consumers must pay attention to run APIs in the appropriate context
+ * (e.g. NAPI context).
+ */
+static DEFINE_PER_CPU_ALIGNED(struct page_pool *, system_page_pool);
+
#ifdef CONFIG_LOCKDEP
/*
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
@@ -551,7 +540,7 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
if (pt->type == htons(ETH_P_ALL))
- return pt->dev ? &pt->dev->ptype_all : &ptype_all;
+ return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all;
else
return pt->dev ? &pt->dev->ptype_specific :
&ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
@@ -653,7 +642,7 @@ int dev_get_iflink(const struct net_device *dev)
if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
return dev->netdev_ops->ndo_get_iflink(dev);
- return dev->ifindex;
+ return READ_ONCE(dev->ifindex);
}
EXPORT_SYMBOL(dev_get_iflink);
@@ -738,9 +727,9 @@ EXPORT_SYMBOL_GPL(dev_fill_forward_path);
* @net: the applicable net namespace
* @name: name to find
*
- * Find an interface by name. Must be called under RTNL semaphore
- * or @dev_base_lock. If the name is found a pointer to the device
- * is returned. If the name is not found then %NULL is returned. The
+ * Find an interface by name. Must be called under RTNL semaphore.
+ * If the name is found a pointer to the device is returned.
+ * If the name is not found then %NULL is returned. The
* reference counters are not incremented so the caller must be
* careful with locks.
*/
@@ -821,8 +810,7 @@ EXPORT_SYMBOL(netdev_get_by_name);
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
- * about locking. The caller must hold either the RTNL semaphore
- * or @dev_base_lock.
+ * about locking. The caller must hold the RTNL semaphore.
*/
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
@@ -1212,13 +1200,13 @@ int dev_change_name(struct net_device *dev, const char *newname)
dev->flags & IFF_UP ? " (while UP)" : "");
old_assign_type = dev->name_assign_type;
- dev->name_assign_type = NET_NAME_RENAMED;
+ WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED);
rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
+ WRITE_ONCE(dev->name_assign_type, old_assign_type);
up_write(&devnet_rename_sem);
return ret;
}
@@ -1227,15 +1215,11 @@ rollback:
netdev_adjacent_rename_links(dev, oldname);
- write_lock(&dev_base_lock);
netdev_name_node_del(dev->name_node);
- write_unlock(&dev_base_lock);
- synchronize_rcu();
+ synchronize_net();
- write_lock(&dev_base_lock);
netdev_name_node_add(net, dev->name_node);
- write_unlock(&dev_base_lock);
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
ret = notifier_to_errno(ret);
@@ -1247,7 +1231,7 @@ rollback:
down_write(&devnet_rename_sem);
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
+ WRITE_ONCE(dev->name_assign_type, old_assign_type);
old_assign_type = NET_NAME_RENAMED;
goto rollback;
} else {
@@ -2242,7 +2226,8 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
*/
bool dev_nit_active(struct net_device *dev)
{
- return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
+ return !list_empty(&net_hotdata.ptype_all) ||
+ !list_empty(&dev->ptype_all);
}
EXPORT_SYMBOL_GPL(dev_nit_active);
@@ -2253,10 +2238,9 @@ EXPORT_SYMBOL_GPL(dev_nit_active);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
- struct packet_type *ptype;
+ struct list_head *ptype_list = &net_hotdata.ptype_all;
+ struct packet_type *ptype, *pt_prev = NULL;
struct sk_buff *skb2 = NULL;
- struct packet_type *pt_prev = NULL;
- struct list_head *ptype_list = &ptype_all;
rcu_read_lock();
again:
@@ -2302,7 +2286,7 @@ again:
pt_prev = ptype;
}
- if (ptype_list == &ptype_all) {
+ if (ptype_list == &net_hotdata.ptype_all) {
ptype_list = &dev->ptype_all;
goto again;
}
@@ -4421,19 +4405,10 @@ EXPORT_SYMBOL(__dev_direct_xmit);
* Receiver routines
*************************************************************************/
-int netdev_max_backlog __read_mostly = 1000;
-EXPORT_SYMBOL(netdev_max_backlog);
-
-int netdev_tstamp_prequeue __read_mostly = 1;
unsigned int sysctl_skb_defer_max __read_mostly = 64;
-int netdev_budget __read_mostly = 300;
-/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
-unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
-int dev_rx_weight __read_mostly = 64;
-int dev_tx_weight __read_mostly = 64;
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
@@ -4475,12 +4450,6 @@ static inline void ____napi_schedule(struct softnet_data *sd,
#ifdef CONFIG_RPS
-/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
-EXPORT_SYMBOL(rps_sock_flow_table);
-u32 rps_cpu_mask __read_mostly;
-EXPORT_SYMBOL(rps_cpu_mask);
-
struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL(rps_needed);
struct static_key_false rfs_needed __read_mostly;
@@ -4572,7 +4541,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (!hash)
goto done;
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
@@ -4582,10 +4551,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
- if ((ident ^ hash) & ~rps_cpu_mask)
+ if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
goto try_rps;
- next_cpu = ident & rps_cpu_mask;
+ next_cpu = ident & net_hotdata.rps_cpu_mask;
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
@@ -4734,7 +4703,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
struct softnet_data *sd;
unsigned int old_flow, new_flow;
- if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
+ if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
return false;
sd = this_cpu_ptr(&softnet_data);
@@ -4782,7 +4751,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
if (!netif_running(skb->dev))
goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
- if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
+ if (qlen <= READ_ONCE(net_hotdata.max_backlog) &&
+ !skb_flow_limit(skb, qlen)) {
if (qlen) {
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
@@ -4858,6 +4828,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
skb_headlen(skb) + mac_len, true);
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
orig_data_end = xdp->data_end;
orig_data = xdp->data;
@@ -4887,6 +4863,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
skb->len += off; /* positive on grow, negative on shrink */
}
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
/* check if XDP changed eth hdr such SKB needs update */
eth = (struct ethhdr *)xdp->data;
if ((orig_eth_type != eth->h_proto) ||
@@ -4920,11 +4904,35 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
return act;
}
-static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+static int
+netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
+{
+ struct sk_buff *skb = *pskb;
+ int err, hroom, troom;
+
+ if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
+ return 0;
+
+ /* In case we have to go down the path and also linearize,
+ * then lets do the pskb_expand_head() work just once here.
+ */
+ hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
+ troom = skb->tail + skb->data_len - skb->end;
+ err = pskb_expand_head(skb,
+ hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
+ troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
+ if (err)
+ return err;
+
+ return skb_linearize(skb);
+}
+
+static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
- u32 act = XDP_DROP;
+ struct sk_buff *skb = *pskb;
+ u32 mac_len, act = XDP_DROP;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
@@ -4932,41 +4940,36 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
if (skb_is_redirected(skb))
return XDP_PASS;
- /* XDP packets must be linear and must have sufficient headroom
- * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
- * native XDP provides, thus we need to do it here as well.
+ /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
+ * bytes. This is the guarantee that also native XDP provides,
+ * thus we need to do it here as well.
*/
+ mac_len = skb->data - skb_mac_header(skb);
+ __skb_push(skb, mac_len);
+
if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
- int troom = skb->tail + skb->data_len - skb->end;
-
- /* In case we have to go down the path and also linearize,
- * then lets do the pskb_expand_head() work just once here.
- */
- if (pskb_expand_head(skb,
- hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
- troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
- goto do_drop;
- if (skb_linearize(skb))
+ if (netif_skb_check_for_xdp(pskb, xdp_prog))
goto do_drop;
}
- act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
+ __skb_pull(*pskb, mac_len);
+
+ act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
case XDP_PASS:
break;
default:
- bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
+ bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(skb->dev, xdp_prog, act);
+ trace_xdp_exception((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
do_drop:
- kfree_skb(skb);
+ kfree_skb(*pskb);
break;
}
@@ -5004,24 +5007,24 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{
if (xdp_prog) {
struct xdp_buff xdp;
u32 act;
int err;
- act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
+ act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
if (act != XDP_PASS) {
switch (act) {
case XDP_REDIRECT:
- err = xdp_do_generic_redirect(skb->dev, skb,
+ err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
&xdp, xdp_prog);
if (err)
goto out_redir;
break;
case XDP_TX:
- generic_xdp_tx(skb, xdp_prog);
+ generic_xdp_tx(*pskb, xdp_prog);
break;
}
return XDP_DROP;
@@ -5029,7 +5032,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
}
return XDP_PASS;
out_redir:
- kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
+ kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
return XDP_DROP;
}
EXPORT_SYMBOL_GPL(do_xdp_generic);
@@ -5038,7 +5041,7 @@ static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_rx(skb);
@@ -5330,7 +5333,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
int ret = NET_RX_DROP;
__be16 type;
- net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_receive_skb(skb);
@@ -5352,7 +5355,8 @@ another_round:
int ret2;
migrate_disable();
- ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
+ ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
+ &skb);
migrate_enable();
if (ret2 != XDP_PASS) {
@@ -5373,7 +5377,7 @@ another_round:
if (pfmemalloc)
goto skip_taps;
- list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -5713,7 +5717,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
@@ -5743,7 +5747,8 @@ void netif_receive_skb_list_internal(struct list_head *head)
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
+ skb);
skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
@@ -5967,7 +5972,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
net_rps_action_and_irq_enable(sd);
}
- napi->weight = READ_ONCE(dev_rx_weight);
+ napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
while (again) {
struct sk_buff *skb;
@@ -6156,6 +6161,27 @@ struct napi_struct *napi_by_id(unsigned int napi_id)
return NULL;
}
+static void skb_defer_free_flush(struct softnet_data *sd)
+{
+ struct sk_buff *skb, *next;
+
+ /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
+ if (!READ_ONCE(sd->defer_list))
+ return;
+
+ spin_lock(&sd->defer_lock);
+ skb = sd->defer_list;
+ sd->defer_list = NULL;
+ sd->defer_count = 0;
+ spin_unlock(&sd->defer_lock);
+
+ while (skb != NULL) {
+ next = skb->next;
+ napi_consume_skb(skb, 1);
+ skb = next;
+ }
+}
+
#if defined(CONFIG_NET_RX_BUSY_POLL)
static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
@@ -6280,6 +6306,7 @@ count:
if (work > 0)
__NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work);
+ skb_defer_free_flush(this_cpu_ptr(&softnet_data));
local_bh_enable();
if (!loop_end || loop_end(loop_end_arg, start_time))
@@ -6709,27 +6736,6 @@ static int napi_thread_wait(struct napi_struct *napi)
return -1;
}
-static void skb_defer_free_flush(struct softnet_data *sd)
-{
- struct sk_buff *skb, *next;
-
- /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
- if (!READ_ONCE(sd->defer_list))
- return;
-
- spin_lock(&sd->defer_lock);
- skb = sd->defer_list;
- sd->defer_list = NULL;
- sd->defer_count = 0;
- spin_unlock(&sd->defer_lock);
-
- while (skb != NULL) {
- next = skb->next;
- napi_consume_skb(skb, 1);
- skb = next;
- }
-}
-
static int napi_threaded_poll(void *data)
{
struct napi_struct *napi = data;
@@ -6771,8 +6777,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
- usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
- int budget = READ_ONCE(netdev_budget);
+ usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
+ int budget = READ_ONCE(net_hotdata.netdev_budget);
LIST_HEAD(list);
LIST_HEAD(repoll);
@@ -8615,12 +8621,12 @@ unsigned int dev_get_flags(const struct net_device *dev)
{
unsigned int flags;
- flags = (dev->flags & ~(IFF_PROMISC |
+ flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC |
IFF_ALLMULTI |
IFF_RUNNING |
IFF_LOWER_UP |
IFF_DORMANT)) |
- (dev->gflags & (IFF_PROMISC |
+ (READ_ONCE(dev->gflags) & (IFF_PROMISC |
IFF_ALLMULTI));
if (netif_running(dev)) {
@@ -8943,7 +8949,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
}
EXPORT_SYMBOL(dev_set_mac_address);
-static DECLARE_RWSEM(dev_addr_sem);
+DECLARE_RWSEM(dev_addr_sem);
int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack)
@@ -9697,11 +9703,11 @@ static void dev_index_release(struct net *net, int ifindex)
/* Delayed registration/unregisteration */
LIST_HEAD(net_todo_list);
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+atomic_t dev_unreg_count = ATOMIC_INIT(0);
static void net_set_todo(struct net_device *dev)
{
list_add_tail(&dev->todo_list, &net_todo_list);
- atomic_inc(&dev_net(dev)->dev_unreg_count);
}
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
@@ -10266,9 +10272,9 @@ int register_netdevice(struct net_device *dev)
goto err_ifindex_release;
ret = netdev_register_kobject(dev);
- write_lock(&dev_base_lock);
- dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
- write_unlock(&dev_base_lock);
+
+ WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
+
if (ret)
goto err_uninit_notify;
@@ -10344,7 +10350,7 @@ EXPORT_SYMBOL(register_netdevice);
* that need to tie several hardware interfaces to a single NAPI
* poll scheduler due to HW limitations.
*/
-int init_dummy_netdev(struct net_device *dev)
+void init_dummy_netdev(struct net_device *dev)
{
/* Clear everything. Note we don't initialize spinlocks
* are they aren't supposed to be taken by any of the
@@ -10372,8 +10378,6 @@ int init_dummy_netdev(struct net_device *dev)
* because users of this 'device' dont need to change
* its refcount.
*/
-
- return 0;
}
EXPORT_SYMBOL_GPL(init_dummy_netdev);
@@ -10528,6 +10532,7 @@ void netdev_run_todo(void)
{
struct net_device *dev, *tmp;
struct list_head list;
+ int cnt;
#ifdef CONFIG_LOCKDEP
struct list_head unlink_list;
@@ -10558,12 +10563,11 @@ void netdev_run_todo(void)
continue;
}
- write_lock(&dev_base_lock);
- dev->reg_state = NETREG_UNREGISTERED;
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
linkwatch_sync_dev(dev);
}
+ cnt = 0;
while (!list_empty(&list)) {
dev = netdev_wait_allrefs_any(&list);
list_del(&dev->todo_list);
@@ -10581,12 +10585,13 @@ void netdev_run_todo(void)
if (dev->needs_free_netdev)
free_netdev(dev);
- if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
- wake_up(&netdev_unregistering_wq);
+ cnt++;
/* Free network device */
kobject_put(&dev->dev.kobj);
}
+ if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count))
+ wake_up(&netdev_unregistering_wq);
}
/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
@@ -10663,6 +10668,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
ops->ndo_get_stats64(dev, storage);
} else if (ops->ndo_get_stats) {
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
+ } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
+ dev_get_tstats64(dev, storage);
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -10977,7 +10984,7 @@ void free_netdev(struct net_device *dev)
}
BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
- dev->reg_state = NETREG_RELEASED;
+ WRITE_ONCE(dev->reg_state, NETREG_RELEASED);
/* will free via device release */
put_device(&dev->dev);
@@ -11033,6 +11040,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
{
struct net_device *dev, *tmp;
LIST_HEAD(close_head);
+ int cnt = 0;
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
@@ -11064,10 +11072,8 @@ void unregister_netdevice_many_notify(struct list_head *head,
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
- write_lock(&dev_base_lock);
- unlist_netdevice(dev, false);
- dev->reg_state = NETREG_UNREGISTERING;
- write_unlock(&dev_base_lock);
+ unlist_netdevice(dev);
+ WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
}
flush_all_backlogs();
@@ -11129,7 +11135,9 @@ void unregister_netdevice_many_notify(struct list_head *head,
list_for_each_entry(dev, head, unreg_list) {
netdev_put(dev, &dev->dev_registered_tracker);
net_set_todo(dev);
+ cnt++;
}
+ atomic_add(cnt, &dev_unreg_count);
list_del(head);
}
@@ -11247,7 +11255,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
dev_close(dev);
/* And unlink it from device chain */
- unlist_netdevice(dev, true);
+ unlist_netdevice(dev);
synchronize_net();
@@ -11583,11 +11591,8 @@ static void __net_exit default_device_exit_net(struct net *net)
snprintf(fb_name, IFNAMSIZ, "dev%%d");
netdev_for_each_altname_safe(dev, name_node, tmp)
- if (netdev_name_in_use(&init_net, name_node->name)) {
- netdev_name_node_del(name_node);
- synchronize_rcu();
+ if (netdev_name_in_use(&init_net, name_node->name))
__netdev_name_node_alt_destroy(name_node);
- }
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
@@ -11694,6 +11699,28 @@ static void __init net_dev_struct_check(void)
*
*/
+/* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
+#define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
+
+static int net_page_pool_create(int cpuid)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ struct page_pool_params page_pool_params = {
+ .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
+ .flags = PP_FLAG_SYSTEM_POOL,
+ .nid = NUMA_NO_NODE,
+ };
+ struct page_pool *pp_ptr;
+
+ pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
+ if (IS_ERR(pp_ptr))
+ return -ENOMEM;
+
+ per_cpu(system_page_pool, cpuid) = pp_ptr;
+#endif
+ return 0;
+}
+
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
@@ -11712,7 +11739,6 @@ static int __init net_dev_init(void)
if (netdev_kobject_init())
goto out;
- INIT_LIST_HEAD(&ptype_all);
for (i = 0; i < PTYPE_HASH_SIZE; i++)
INIT_LIST_HEAD(&ptype_base[i]);
@@ -11746,6 +11772,9 @@ static int __init net_dev_init(void)
init_gro_hash(&sd->backlog);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
+
+ if (net_page_pool_create(i))
+ goto out;
}
dev_boot_phase = 0;
@@ -11773,6 +11802,19 @@ static int __init net_dev_init(void)
WARN_ON(rc < 0);
rc = 0;
out:
+ if (rc < 0) {
+ for_each_possible_cpu(i) {
+ struct page_pool *pp_ptr;
+
+ pp_ptr = per_cpu(system_page_pool, i);
+ if (!pp_ptr)
+ continue;
+
+ page_pool_destroy(pp_ptr);
+ per_cpu(system_page_pool, i) = NULL;
+ }
+ }
+
return rc;
}
diff --git a/net/core/dev.h b/net/core/dev.h
index 7480b4c84298..2bcaf8eee50c 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -3,6 +3,7 @@
#define _NET_CORE_DEV_H
#include <linux/types.h>
+#include <linux/rwsem.h>
struct net;
struct net_device;
@@ -37,15 +38,14 @@ int dev_addr_init(struct net_device *dev);
void dev_addr_check(struct net_device *dev);
/* sysctls not referred to from outside net/core/ */
-extern int netdev_budget;
-extern unsigned int netdev_budget_usecs;
extern unsigned int sysctl_skb_defer_max;
-extern int netdev_tstamp_prequeue;
extern int netdev_unregister_timeout_secs;
extern int weight_p;
extern int dev_weight_rx_bias;
extern int dev_weight_tx_bias;
+extern struct rw_semaphore dev_addr_sem;
+
/* rtnl helpers */
extern struct list_head net_todo_list;
void netdev_run_todo(void);
@@ -56,6 +56,7 @@ struct netdev_name_node {
struct list_head list;
struct net_device *dev;
const char *name;
+ struct rcu_head rcu;
};
int netdev_get_name(struct net *net, char *name, int ifindex);
diff --git a/net/core/dst.c b/net/core/dst.c
index 6838d3212c37..95f533844f17 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -96,7 +96,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
}
EXPORT_SYMBOL(dst_alloc);
-struct dst_entry *dst_destroy(struct dst_entry * dst)
+static void dst_destroy(struct dst_entry *dst)
{
struct dst_entry *child = NULL;
@@ -126,15 +126,13 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
dst = child;
if (dst)
dst_release_immediate(dst);
- return NULL;
}
-EXPORT_SYMBOL(dst_destroy);
static void dst_destroy_rcu(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
- dst = dst_destroy(dst);
+ dst_destroy(dst);
}
/* Operations to mark dst as DEAD and clean up the net device referenced
diff --git a/net/core/filter.c b/net/core/filter.c
index ef3e78b6a39c..8adf95765cdd 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -88,7 +88,7 @@
#include "dev.h"
static const struct bpf_func_proto *
-bpf_sk_base_func_proto(enum bpf_func_id func_id);
+bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
{
@@ -778,7 +778,7 @@ jmp_rest:
BPF_EMIT_JMP;
break;
- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
+ /* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B: {
struct sock_filter tmp = {
.code = BPF_LD | BPF_ABS | BPF_B,
@@ -804,7 +804,7 @@ jmp_rest:
*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
break;
}
- /* RET_K is remaped into 2 insns. RET_A case doesn't need an
+ /* RET_K is remapped into 2 insns. RET_A case doesn't need an
* extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
*/
case BPF_RET | BPF_A:
@@ -2968,7 +2968,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
*
* Then if B is non-zero AND there is no space allocate space and
* compact A, B regions into page. If there is space shift ring to
- * the rigth free'ing the next element in ring to place B, leaving
+ * the right free'ing the next element in ring to place B, leaving
* A untouched except to reduce length.
*/
if (start != offset) {
@@ -5988,7 +5988,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
return -ENODEV;
idev = __in6_dev_get_safely(dev);
- if (unlikely(!idev || !idev->cnf.forwarding))
+ if (unlikely(!idev || !READ_ONCE(idev->cnf.forwarding)))
return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
@@ -7894,7 +7894,7 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -7987,7 +7987,7 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8006,7 +8006,7 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_skb_event_output_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8193,7 +8193,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8252,13 +8252,13 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
#if IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)
/* The nf_conn___init type is used in the NF_CONNTRACK kfuncs. The
* kfuncs are defined in two different modules, and we want to be able
- * to use them interchangably with the same BTF type ID. Because modules
+ * to use them interchangeably with the same BTF type ID. Because modules
* can't de-duplicate BTF IDs between each other, we need the type to be
* referenced in the vmlinux BTF or the verifier will get confused about
* the different types. So we add this dummy type reference which will
@@ -8313,7 +8313,7 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_sock_proto;
#endif /* CONFIG_INET */
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8355,7 +8355,7 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_cgroup_classid_curr_proto;
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8399,7 +8399,7 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skc_lookup_tcp_proto;
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8410,7 +8410,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_load_bytes:
return &bpf_flow_dissector_load_bytes_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8437,7 +8437,7 @@ lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8612,7 +8612,7 @@ static bool cg_skb_is_valid_access(int off, int size,
return false;
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_end):
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
break;
}
@@ -8624,7 +8624,7 @@ static bool cg_skb_is_valid_access(int off, int size,
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
break;
case bpf_ctx_range(struct __sk_buff, tstamp):
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
break;
default:
@@ -11268,7 +11268,7 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -11450,7 +11450,7 @@ sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -11784,7 +11784,7 @@ const struct bpf_func_proto bpf_sock_from_file_proto = {
};
static const struct bpf_func_proto *
-bpf_sk_base_func_proto(enum bpf_func_id func_id)
+bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
const struct bpf_func_proto *func;
@@ -11813,10 +11813,10 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
- if (!perfmon_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
return NULL;
return func;
@@ -11869,6 +11869,103 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
return 0;
}
+
+__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk,
+ struct bpf_tcp_req_attrs *attrs, int attrs__sz)
+{
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+ const struct request_sock_ops *ops;
+ struct inet_request_sock *ireq;
+ struct tcp_request_sock *treq;
+ struct request_sock *req;
+ struct net *net;
+ __u16 min_mss;
+ u32 tsoff = 0;
+
+ if (attrs__sz != sizeof(*attrs) ||
+ attrs->reserved[0] || attrs->reserved[1] || attrs->reserved[2])
+ return -EINVAL;
+
+ if (!skb_at_tc_ingress(skb))
+ return -EINVAL;
+
+ net = dev_net(skb->dev);
+ if (net != sock_net(sk))
+ return -ENETUNREACH;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ops = &tcp_request_sock_ops;
+ min_mss = 536;
+ break;
+#if IS_BUILTIN(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ ops = &tcp6_request_sock_ops;
+ min_mss = IPV6_MIN_MTU - 60;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ if (sk->sk_type != SOCK_STREAM || sk->sk_state != TCP_LISTEN ||
+ sk_is_mptcp(sk))
+ return -EINVAL;
+
+ if (attrs->mss < min_mss)
+ return -EINVAL;
+
+ if (attrs->wscale_ok) {
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_window_scaling))
+ return -EINVAL;
+
+ if (attrs->snd_wscale > TCP_MAX_WSCALE ||
+ attrs->rcv_wscale > TCP_MAX_WSCALE)
+ return -EINVAL;
+ }
+
+ if (attrs->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack))
+ return -EINVAL;
+
+ if (attrs->tstamp_ok) {
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps))
+ return -EINVAL;
+
+ tsoff = attrs->rcv_tsecr - tcp_ns_to_ts(attrs->usec_ts_ok, tcp_clock_ns());
+ }
+
+ req = inet_reqsk_alloc(ops, sk, false);
+ if (!req)
+ return -ENOMEM;
+
+ ireq = inet_rsk(req);
+ treq = tcp_rsk(req);
+
+ req->rsk_listener = sk;
+ req->syncookie = 1;
+ req->mss = attrs->mss;
+ req->ts_recent = attrs->rcv_tsval;
+
+ ireq->snd_wscale = attrs->snd_wscale;
+ ireq->rcv_wscale = attrs->rcv_wscale;
+ ireq->tstamp_ok = !!attrs->tstamp_ok;
+ ireq->sack_ok = !!attrs->sack_ok;
+ ireq->wscale_ok = !!attrs->wscale_ok;
+ ireq->ecn_ok = !!attrs->ecn_ok;
+
+ treq->req_usec_ts = !!attrs->usec_ts_ok;
+ treq->ts_off = tsoff;
+
+ skb_orphan(skb);
+ skb->sk = req_to_sk(req);
+ skb->destructor = sock_pfree;
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
__bpf_kfunc_end_defs();
int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
@@ -11885,17 +11982,21 @@ int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
return 0;
}
-BTF_SET8_START(bpf_kfunc_check_set_skb)
+BTF_KFUNCS_START(bpf_kfunc_check_set_skb)
BTF_ID_FLAGS(func, bpf_dynptr_from_skb)
-BTF_SET8_END(bpf_kfunc_check_set_skb)
+BTF_KFUNCS_END(bpf_kfunc_check_set_skb)
-BTF_SET8_START(bpf_kfunc_check_set_xdp)
+BTF_KFUNCS_START(bpf_kfunc_check_set_xdp)
BTF_ID_FLAGS(func, bpf_dynptr_from_xdp)
-BTF_SET8_END(bpf_kfunc_check_set_xdp)
+BTF_KFUNCS_END(bpf_kfunc_check_set_xdp)
-BTF_SET8_START(bpf_kfunc_check_set_sock_addr)
+BTF_KFUNCS_START(bpf_kfunc_check_set_sock_addr)
BTF_ID_FLAGS(func, bpf_sock_addr_set_sun_path)
-BTF_SET8_END(bpf_kfunc_check_set_sock_addr)
+BTF_KFUNCS_END(bpf_kfunc_check_set_sock_addr)
+
+BTF_KFUNCS_START(bpf_kfunc_check_set_tcp_reqsk)
+BTF_ID_FLAGS(func, bpf_sk_assign_tcp_reqsk, KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(bpf_kfunc_check_set_tcp_reqsk)
static const struct btf_kfunc_id_set bpf_kfunc_set_skb = {
.owner = THIS_MODULE,
@@ -11912,6 +12013,11 @@ static const struct btf_kfunc_id_set bpf_kfunc_set_sock_addr = {
.set = &bpf_kfunc_check_set_sock_addr,
};
+static const struct btf_kfunc_id_set bpf_kfunc_set_tcp_reqsk = {
+ .owner = THIS_MODULE,
+ .set = &bpf_kfunc_check_set_tcp_reqsk,
+};
+
static int __init bpf_kfunc_init(void)
{
int ret;
@@ -11927,8 +12033,9 @@ static int __init bpf_kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp);
- return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- &bpf_kfunc_set_sock_addr);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ &bpf_kfunc_set_sock_addr);
+ return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_tcp_reqsk);
}
late_initcall(bpf_kfunc_init);
@@ -11968,9 +12075,9 @@ __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock)
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_sk_iter_kfunc_ids)
+BTF_KFUNCS_START(bpf_sk_iter_kfunc_ids)
BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS)
-BTF_SET8_END(bpf_sk_iter_kfunc_ids)
+BTF_KFUNCS_END(bpf_sk_iter_kfunc_ids)
static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
diff --git a/net/core/gro.c b/net/core/gro.c
index 0759277dc14e..ee30d4f0c038 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -10,9 +10,6 @@
#define GRO_MAX_HEAD (MAX_HEADER + 128)
static DEFINE_SPINLOCK(offload_lock);
-struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
-/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
-int gro_normal_batch __read_mostly = 8;
/**
* dev_add_offload - register offload handlers
@@ -31,7 +28,7 @@ void dev_add_offload(struct packet_offload *po)
struct packet_offload *elem;
spin_lock(&offload_lock);
- list_for_each_entry(elem, &offload_base, list) {
+ list_for_each_entry(elem, &net_hotdata.offload_base, list) {
if (po->priority < elem->priority)
break;
}
@@ -55,7 +52,7 @@ EXPORT_SYMBOL(dev_add_offload);
*/
static void __dev_remove_offload(struct packet_offload *po)
{
- struct list_head *head = &offload_base;
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *po1;
spin_lock(&offload_lock);
@@ -235,9 +232,9 @@ done:
static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
- struct list_head *head = &offload_base;
int err = -ENOENT;
BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
@@ -369,15 +366,21 @@ static void gro_list_prepare(const struct list_head *head,
static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
{
- const struct skb_shared_info *pinfo = skb_shinfo(skb);
- const skb_frag_t *frag0 = &pinfo->frags[0];
+ const struct skb_shared_info *pinfo;
+ const skb_frag_t *frag0;
+ unsigned int headlen;
NAPI_GRO_CB(skb)->data_offset = 0;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ headlen = skb_headlen(skb);
+ NAPI_GRO_CB(skb)->frag0 = skb->data;
+ NAPI_GRO_CB(skb)->frag0_len = headlen;
+ if (headlen)
+ return;
+
+ pinfo = skb_shinfo(skb);
+ frag0 = &pinfo->frags[0];
- if (!skb_headlen(skb) && pinfo->nr_frags &&
- !PageHighMem(skb_frag_page(frag0)) &&
+ if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
(!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
@@ -438,7 +441,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
{
u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
struct gro_list *gro_list = &napi->gro_hash[bucket];
- struct list_head *head = &offload_base;
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
struct sk_buff *pp = NULL;
@@ -544,7 +547,7 @@ normal:
struct packet_offload *gro_find_receive_by_type(__be16 type)
{
- struct list_head *offload_head = &offload_base;
+ struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {
@@ -558,7 +561,7 @@ EXPORT_SYMBOL(gro_find_receive_by_type);
struct packet_offload *gro_find_complete_by_type(__be16 type)
{
- struct list_head *offload_head = &offload_base;
+ struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {
@@ -700,7 +703,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb, hlen);
- if (unlikely(skb_gro_header_hard(skb, hlen))) {
+ if (unlikely(!skb_gro_may_pull(skb, hlen))) {
eth = skb_gro_header_slow(skb, hlen, 0);
if (unlikely(!eth)) {
net_warn_ratelimited("%s: dropping impossible skb from %s\n",
@@ -710,7 +713,10 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
}
} else {
eth = (const struct ethhdr *)skb->data;
- gro_pull_from_frag0(skb, hlen);
+
+ if (NAPI_GRO_CB(skb)->frag0 != skb->data)
+ gro_pull_from_frag0(skb, hlen);
+
NAPI_GRO_CB(skb)->frag0 += hlen;
NAPI_GRO_CB(skb)->frag0_len -= hlen;
}
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index ed5ec5de47f6..ff8e5b64bf6b 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <net/gro_cells.h>
+#include <net/hotdata.h>
struct gro_cell {
struct sk_buff_head napi_skbs;
@@ -26,7 +27,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
cell = this_cpu_ptr(gcells->cells);
- if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
+ if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
drop:
dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
diff --git a/net/core/gso.c b/net/core/gso.c
index 9e1803bfc9c6..bcd156372f4d 100644
--- a/net/core/gso.c
+++ b/net/core/gso.c
@@ -17,7 +17,7 @@ struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
struct packet_offload *ptype;
rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;
@@ -48,7 +48,7 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
__skb_pull(skb, vlan_depth);
rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;
diff --git a/net/core/hotdata.c b/net/core/hotdata.c
new file mode 100644
index 000000000000..c8a7a451c18a
--- /dev/null
+++ b/net/core/hotdata.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/hotdata.h>
+#include <linux/cache.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+
+
+struct net_hotdata net_hotdata __cacheline_aligned = {
+ .offload_base = LIST_HEAD_INIT(net_hotdata.offload_base),
+ .ptype_all = LIST_HEAD_INIT(net_hotdata.ptype_all),
+ .gro_normal_batch = 8,
+
+ .netdev_budget = 300,
+ /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
+ .netdev_budget_usecs = 2 * USEC_PER_SEC / HZ,
+
+ .tstamp_prequeue = 1,
+ .max_backlog = 1000,
+ .dev_tx_weight = 64,
+ .dev_rx_weight = 64,
+};
+EXPORT_SYMBOL(net_hotdata);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 429571c258da..8ec35194bfcb 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -33,7 +33,7 @@ static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
-static unsigned char default_operstate(const struct net_device *dev)
+static unsigned int default_operstate(const struct net_device *dev)
{
if (netif_testing(dev))
return IF_OPER_TESTING;
@@ -62,16 +62,13 @@ static unsigned char default_operstate(const struct net_device *dev)
return IF_OPER_UP;
}
-
static void rfc2863_policy(struct net_device *dev)
{
- unsigned char operstate = default_operstate(dev);
+ unsigned int operstate = default_operstate(dev);
- if (operstate == dev->operstate)
+ if (operstate == READ_ONCE(dev->operstate))
return;
- write_lock(&dev_base_lock);
-
switch(dev->link_mode) {
case IF_LINK_MODE_TESTING:
if (operstate == IF_OPER_UP)
@@ -87,9 +84,7 @@ static void rfc2863_policy(struct net_device *dev)
break;
}
- dev->operstate = operstate;
-
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->operstate, operstate);
}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 09f7ed1a04e8..a97eceb84e61 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -3,52 +3,22 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/wext.h>
+#include <net/hotdata.h>
#include "dev.h"
-#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
-
-#define get_bucket(x) ((x) >> BUCKET_SPACE)
-#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
-#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
-
-static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
+static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos)
{
- struct net *net = seq_file_net(seq);
+ unsigned long ifindex = *pos;
struct net_device *dev;
- struct hlist_head *h;
- unsigned int count = 0, offset = get_offset(*pos);
- h = &net->dev_index_head[get_bucket(*pos)];
- hlist_for_each_entry_rcu(dev, h, index_hlist) {
- if (++count == offset)
- return dev;
+ for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
+ *pos = dev->ifindex;
+ return dev;
}
-
- return NULL;
-}
-
-static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
-{
- struct net_device *dev;
- unsigned int bucket;
-
- do {
- dev = dev_from_same_bucket(seq, pos);
- if (dev)
- return dev;
-
- bucket = get_bucket(*pos) + 1;
- *pos = set_bucket_offset(bucket, 1);
- } while (bucket < NETDEV_HASHENTRIES);
-
return NULL;
}
-/*
- * This is invoked by the /proc filesystem handler to display a device
- * in detail.
- */
static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
@@ -56,16 +26,13 @@ static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
if (!*pos)
return SEQ_START_TOKEN;
- if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
- return NULL;
-
- return dev_from_bucket(seq, pos);
+ return dev_seq_from_index(seq, pos);
}
static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
- return dev_from_bucket(seq, pos);
+ return dev_seq_from_index(seq, pos);
}
static void dev_seq_stop(struct seq_file *seq, void *v)
@@ -217,7 +184,7 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
}
}
- list_for_each_entry_rcu(pt, &ptype_all, list) {
+ list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
if (i == pos)
return pt;
++i;
@@ -265,13 +232,13 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
}
- nxt = ptype_all.next;
+ nxt = net_hotdata.ptype_all.next;
goto ptype_all;
}
if (pt->type == htons(ETH_P_ALL)) {
ptype_all:
- if (nxt != &ptype_all)
+ if (nxt != &net_hotdata.ptype_all)
goto found;
hash = 0;
nxt = ptype_base[0].next;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a09d507c5b03..e3d7a8cfa20b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -24,6 +24,7 @@
#include <linux/of_net.h>
#include <linux/cpu.h>
#include <net/netdev_rx_queue.h>
+#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"
@@ -34,10 +35,10 @@ static const char fmt_dec[] = "%d\n";
static const char fmt_ulong[] = "%lu\n";
static const char fmt_u64[] = "%llu\n";
-/* Caller holds RTNL or dev_base_lock */
+/* Caller holds RTNL or RCU */
static inline int dev_isalive(const struct net_device *dev)
{
- return dev->reg_state <= NETREG_REGISTERED;
+ return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED;
}
/* use same locking rules as GIF* ioctl's */
@@ -48,10 +49,10 @@ static ssize_t netdev_show(const struct device *dev,
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (dev_isalive(ndev))
ret = (*format)(ndev, buf);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
@@ -60,7 +61,7 @@ static ssize_t netdev_show(const struct device *dev,
#define NETDEVICE_SHOW(field, format_string) \
static ssize_t format_##field(const struct net_device *dev, char *buf) \
{ \
- return sysfs_emit(buf, format_string, dev->field); \
+ return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \
} \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -125,7 +126,7 @@ static DEVICE_ATTR_RO(iflink);
static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
{
- return sysfs_emit(buf, fmt_dec, dev->name_assign_type);
+ return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type));
}
static ssize_t name_assign_type_show(struct device *dev,
@@ -135,24 +136,28 @@ static ssize_t name_assign_type_show(struct device *dev,
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- if (ndev->name_assign_type != NET_NAME_UNKNOWN)
+ if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN)
ret = netdev_show(dev, attr, buf, format_name_assign_type);
return ret;
}
static DEVICE_ATTR_RO(name_assign_type);
-/* use same locking rules as GIFHWADDR ioctl's */
+/* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- read_lock(&dev_base_lock);
+ down_read(&dev_addr_sem);
+
+ rcu_read_lock();
if (dev_isalive(ndev))
ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
+
+ up_read(&dev_addr_sem);
return ret;
}
static DEVICE_ATTR_RO(address);
@@ -161,10 +166,13 @@ static ssize_t broadcast_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *ndev = to_net_dev(dev);
+ int ret = -EINVAL;
+ rcu_read_lock();
if (dev_isalive(ndev))
- return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
- return -EINVAL;
+ ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
+ rcu_read_unlock();
+ return ret;
}
static DEVICE_ATTR_RO(broadcast);
@@ -318,11 +326,9 @@ static ssize_t operstate_show(struct device *dev,
const struct net_device *netdev = to_net_dev(dev);
unsigned char operstate;
- read_lock(&dev_base_lock);
- operstate = netdev->operstate;
+ operstate = READ_ONCE(netdev->operstate);
if (!netif_running(netdev))
operstate = IF_OPER_DOWN;
- read_unlock(&dev_base_lock);
if (operstate >= ARRAY_SIZE(operstates))
return -EINVAL; /* should not happen */
@@ -680,14 +686,14 @@ static ssize_t netstat_show(const struct device *d,
WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
offset % sizeof(u64) != 0);
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (dev_isalive(dev)) {
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
ret = sysfs_emit(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
@@ -1409,6 +1415,65 @@ static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
= __ATTR(hold_time, 0644,
bql_show_hold_time, bql_set_hold_time);
+static ssize_t bql_show_stall_thrs(struct netdev_queue *queue, char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sprintf(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs));
+}
+
+static ssize_t bql_set_stall_thrs(struct netdev_queue *queue,
+ const char *buf, size_t len)
+{
+ struct dql *dql = &queue->dql;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err < 0)
+ return err;
+
+ value = msecs_to_jiffies(value);
+ if (value && (value < 4 || value > 4 / 2 * BITS_PER_LONG))
+ return -ERANGE;
+
+ if (!dql->stall_thrs && value)
+ dql->last_reap = jiffies;
+ /* Force last_reap to be live */
+ smp_wmb();
+ dql->stall_thrs = value;
+
+ return len;
+}
+
+static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init =
+ __ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs);
+
+static ssize_t bql_show_stall_max(struct netdev_queue *queue, char *buf)
+{
+ return sprintf(buf, "%u\n", READ_ONCE(queue->dql.stall_max));
+}
+
+static ssize_t bql_set_stall_max(struct netdev_queue *queue,
+ const char *buf, size_t len)
+{
+ WRITE_ONCE(queue->dql.stall_max, 0);
+ return len;
+}
+
+static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init =
+ __ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max);
+
+static ssize_t bql_show_stall_cnt(struct netdev_queue *queue, char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sprintf(buf, "%lu\n", dql->stall_cnt);
+}
+
+static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init =
+ __ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL);
+
static ssize_t bql_show_inflight(struct netdev_queue *queue,
char *buf)
{
@@ -1447,6 +1512,9 @@ static struct attribute *dql_attrs[] __ro_after_init = {
&bql_limit_min_attribute.attr,
&bql_hold_time_attribute.attr,
&bql_inflight_attribute.attr,
+ &bql_stall_thrs_attribute.attr,
+ &bql_stall_cnt_attribute.attr,
+ &bql_stall_max_attribute.attr,
NULL
};
@@ -1454,6 +1522,9 @@ static const struct attribute_group dql_group = {
.name = "byte_queue_limits",
.attrs = dql_attrs,
};
+#else
+/* Fake declaration, all the code using it should be dead */
+extern const struct attribute_group dql_group;
#endif /* CONFIG_BQL */
#ifdef CONFIG_XPS
@@ -1691,6 +1762,15 @@ static const struct kobj_type netdev_queue_ktype = {
.get_ownership = netdev_queue_get_ownership,
};
+static bool netdev_uses_bql(const struct net_device *dev)
+{
+ if (dev->features & NETIF_F_LLTX ||
+ dev->priv_flags & IFF_NO_QUEUE)
+ return false;
+
+ return IS_ENABLED(CONFIG_BQL);
+}
+
static int netdev_queue_add_kobject(struct net_device *dev, int index)
{
struct netdev_queue *queue = dev->_tx + index;
@@ -1708,11 +1788,11 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
if (error)
goto err;
-#ifdef CONFIG_BQL
- error = sysfs_create_group(kobj, &dql_group);
- if (error)
- goto err;
-#endif
+ if (netdev_uses_bql(dev)) {
+ error = sysfs_create_group(kobj, &dql_group);
+ if (error)
+ goto err;
+ }
kobject_uevent(kobj, KOBJ_ADD);
return 0;
@@ -1733,9 +1813,9 @@ static int tx_queue_change_owner(struct net_device *ndev, int index,
if (error)
return error;
-#ifdef CONFIG_BQL
- error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
-#endif
+ if (netdev_uses_bql(ndev))
+ error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
+
return error;
}
#endif /* CONFIG_SYSFS */
@@ -1767,9 +1847,10 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
if (!refcount_read(&dev_net(dev)->ns.count))
queue->kobj.uevent_suppress = 1;
-#ifdef CONFIG_BQL
- sysfs_remove_group(&queue->kobj, &dql_group);
-#endif
+
+ if (netdev_uses_bql(dev))
+ sysfs_remove_group(&queue->kobj, &dql_group);
+
kobject_put(&queue->kobj);
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 72799533426b..f0540c557515 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -318,8 +318,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
{
/* Must be called with pernet_ops_rwsem held */
const struct pernet_operations *ops, *saved_ops;
- int error = 0;
LIST_HEAD(net_exit_list);
+ LIST_HEAD(dev_kill_list);
+ int error = 0;
refcount_set(&net->ns.count, 1);
ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
@@ -358,6 +359,15 @@ out_undo:
synchronize_rcu();
ops = saved_ops;
+ rtnl_lock();
+ list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
+ if (ops->exit_batch_rtnl)
+ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
+ }
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
+
+ ops = saved_ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
@@ -573,6 +583,7 @@ static void cleanup_net(struct work_struct *work)
struct net *net, *tmp, *last;
struct llist_node *net_kill_list;
LIST_HEAD(net_exit_list);
+ LIST_HEAD(dev_kill_list);
/* Atomically snapshot the list of namespaces to cleanup */
net_kill_list = llist_del_all(&cleanup_list);
@@ -611,7 +622,15 @@ static void cleanup_net(struct work_struct *work)
* the rcu_barrier() below isn't sufficient alone.
* Also the pre_exit() and exit() methods need this barrier.
*/
- synchronize_rcu();
+ synchronize_rcu_expedited();
+
+ rtnl_lock();
+ list_for_each_entry_reverse(ops, &pernet_list, list) {
+ if (ops->exit_batch_rtnl)
+ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
+ }
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
/* Run all of the network namespace exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list)
@@ -1193,7 +1212,17 @@ static void free_exit_list(struct pernet_operations *ops, struct list_head *net_
{
ops_pre_exit_list(ops, net_exit_list);
synchronize_rcu();
+
+ if (ops->exit_batch_rtnl) {
+ LIST_HEAD(dev_kill_list);
+
+ rtnl_lock();
+ ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
+ }
ops_exit_list(ops, net_exit_list);
+
ops_free_list(ops, net_exit_list);
}
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index be7f2ebd61b2..8d8ace9ef87f 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -68,6 +68,11 @@ static const struct nla_policy netdev_napi_get_dump_nl_policy[NETDEV_A_NAPI_IFIN
[NETDEV_A_NAPI_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
};
+/* NETDEV_CMD_QSTATS_GET - dump */
+static const struct nla_policy netdev_qstats_get_nl_policy[NETDEV_A_QSTATS_SCOPE + 1] = {
+ [NETDEV_A_QSTATS_SCOPE] = NLA_POLICY_MASK(NLA_UINT, 0x1),
+};
+
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@@ -138,6 +143,13 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.maxattr = NETDEV_A_NAPI_IFINDEX,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = NETDEV_CMD_QSTATS_GET,
+ .dumpit = netdev_nl_qstats_get_dumpit,
+ .policy = netdev_qstats_get_nl_policy,
+ .maxattr = NETDEV_A_QSTATS_SCOPE,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index a47f2bcbe4fa..4db40fd5b4a9 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -28,6 +28,8 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
enum {
NETDEV_NLGRP_MGMT,
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index fd98936da3ae..7004b3399c2b 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -8,6 +8,7 @@
#include <net/xdp.h>
#include <net/xdp_sock.h>
#include <net/netdev_rx_queue.h>
+#include <net/netdev_queues.h>
#include <net/busy_poll.h>
#include "netdev-genl-gen.h"
@@ -152,10 +153,7 @@ int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
-
- return skb->len;
+ return err;
}
static int
@@ -287,10 +285,7 @@ int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
-
- return skb->len;
+ return err;
}
static int
@@ -463,10 +458,220 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
+ return err;
+}
+
+#define NETDEV_STAT_NOT_SET (~0ULL)
+
+static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
+{
+ const u64 *add = _add;
+ u64 *sum = _sum;
+
+ while (size) {
+ if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
+ *sum += *add;
+ sum++;
+ add++;
+ size -= 8;
+ }
+}
+
+static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
+{
+ if (value == NETDEV_STAT_NOT_SET)
+ return 0;
+ return nla_put_uint(rsp, attr_id, value);
+}
+
+static int
+netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
+{
+ if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int
+netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
+{
+ if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int
+netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
+ u32 q_type, int i, const struct genl_info *info)
+{
+ const struct netdev_stat_ops *ops = netdev->stat_ops;
+ struct netdev_queue_stats_rx rx;
+ struct netdev_queue_stats_tx tx;
+ void *hdr;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+ if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
+ nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
+ nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
+ goto nla_put_failure;
+
+ switch (q_type) {
+ case NETDEV_QUEUE_TYPE_RX:
+ memset(&rx, 0xff, sizeof(rx));
+ ops->get_queue_stats_rx(netdev, i, &rx);
+ if (!memchr_inv(&rx, 0xff, sizeof(rx)))
+ goto nla_cancel;
+ if (netdev_nl_stats_write_rx(rsp, &rx))
+ goto nla_put_failure;
+ break;
+ case NETDEV_QUEUE_TYPE_TX:
+ memset(&tx, 0xff, sizeof(tx));
+ ops->get_queue_stats_tx(netdev, i, &tx);
+ if (!memchr_inv(&tx, 0xff, sizeof(tx)))
+ goto nla_cancel;
+ if (netdev_nl_stats_write_tx(rsp, &tx))
+ goto nla_put_failure;
+ break;
+ }
+
+ genlmsg_end(rsp, hdr);
+ return 0;
+
+nla_cancel:
+ genlmsg_cancel(rsp, hdr);
+ return 0;
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info,
+ struct netdev_nl_dump_ctx *ctx)
+{
+ const struct netdev_stat_ops *ops = netdev->stat_ops;
+ int i, err;
+
+ if (!(netdev->flags & IFF_UP))
+ return 0;
+
+ i = ctx->rxq_idx;
+ while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
+ err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
+ i, info);
+ if (err)
+ return err;
+ ctx->rxq_idx = i++;
+ }
+ i = ctx->txq_idx;
+ while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
+ err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
+ i, info);
+ if (err)
+ return err;
+ ctx->txq_idx = i++;
+ }
+
+ ctx->rxq_idx = 0;
+ ctx->txq_idx = 0;
+ return 0;
+}
+
+static int
+netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info)
+{
+ struct netdev_queue_stats_rx rx_sum, rx;
+ struct netdev_queue_stats_tx tx_sum, tx;
+ const struct netdev_stat_ops *ops;
+ void *hdr;
+ int i;
+
+ ops = netdev->stat_ops;
+ /* Netdev can't guarantee any complete counters */
+ if (!ops->get_base_stats)
+ return 0;
+
+ memset(&rx_sum, 0xff, sizeof(rx_sum));
+ memset(&tx_sum, 0xff, sizeof(tx_sum));
+
+ ops->get_base_stats(netdev, &rx_sum, &tx_sum);
+
+ /* The op was there, but nothing reported, don't bother */
+ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
+ !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
+ return 0;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+ if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ memset(&rx, 0xff, sizeof(rx));
+ if (ops->get_queue_stats_rx)
+ ops->get_queue_stats_rx(netdev, i, &rx);
+ netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
+ }
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ memset(&tx, 0xff, sizeof(tx));
+ if (ops->get_queue_stats_tx)
+ ops->get_queue_stats_tx(netdev, i, &tx);
+ netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
+ }
+
+ if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
+ netdev_nl_stats_write_tx(rsp, &tx_sum))
+ goto nla_put_failure;
+
+ genlmsg_end(rsp, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net *net = sock_net(skb->sk);
+ struct net_device *netdev;
+ unsigned int scope;
+ int err = 0;
+
+ scope = 0;
+ if (info->attrs[NETDEV_A_QSTATS_SCOPE])
+ scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
+
+ rtnl_lock();
+ for_each_netdev_dump(net, netdev, ctx->ifindex) {
+ if (!netdev->stat_ops)
+ continue;
+
+ switch (scope) {
+ case 0:
+ err = netdev_nl_stats_by_netdev(netdev, skb, info);
+ break;
+ case NETDEV_QSTATS_SCOPE_QUEUE:
+ err = netdev_nl_stats_by_queue(netdev, skb, info, ctx);
+ break;
+ }
+ if (err < 0)
+ break;
+ }
+ rtnl_unlock();
- return skb->len;
+ return err;
}
static int netdev_genl_netdevice_event(struct notifier_block *nb,
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 4933762e5a6b..dd364d738c00 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -31,6 +31,8 @@
#define BIAS_MAX (LONG_MAX >> 1)
#ifdef CONFIG_PAGE_POOL_STATS
+static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
+
/* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
/* recycle_stat_inc is safe to use when preemption is possible. */
@@ -171,13 +173,16 @@ static void page_pool_producer_unlock(struct page_pool *pool,
}
static int page_pool_init(struct page_pool *pool,
- const struct page_pool_params *params)
+ const struct page_pool_params *params,
+ int cpuid)
{
unsigned int ring_qsize = 1024; /* Default */
memcpy(&pool->p, &params->fast, sizeof(pool->p));
memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
+ pool->cpuid = cpuid;
+
/* Validate only known flags were used */
if (pool->p.flags & ~(PP_FLAG_ALL))
return -EINVAL;
@@ -217,14 +222,23 @@ static int page_pool_init(struct page_pool *pool,
pool->has_init_callback = !!pool->slow.init_callback;
#ifdef CONFIG_PAGE_POOL_STATS
- pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
- if (!pool->recycle_stats)
- return -ENOMEM;
+ if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL)) {
+ pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
+ if (!pool->recycle_stats)
+ return -ENOMEM;
+ } else {
+ /* For system page pool instance we use a singular stats object
+ * instead of allocating a separate percpu variable for each
+ * (also percpu) page pool instance.
+ */
+ pool->recycle_stats = &pp_system_recycle_stats;
+ }
#endif
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
#ifdef CONFIG_PAGE_POOL_STATS
- free_percpu(pool->recycle_stats);
+ if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
+ free_percpu(pool->recycle_stats);
#endif
return -ENOMEM;
}
@@ -248,15 +262,18 @@ static void page_pool_uninit(struct page_pool *pool)
put_device(pool->p.dev);
#ifdef CONFIG_PAGE_POOL_STATS
- free_percpu(pool->recycle_stats);
+ if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
+ free_percpu(pool->recycle_stats);
#endif
}
/**
- * page_pool_create() - create a page pool.
+ * page_pool_create_percpu() - create a page pool for a given cpu.
* @params: parameters, see struct page_pool_params
+ * @cpuid: cpu identifier
*/
-struct page_pool *page_pool_create(const struct page_pool_params *params)
+struct page_pool *
+page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
{
struct page_pool *pool;
int err;
@@ -265,7 +282,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
if (!pool)
return ERR_PTR(-ENOMEM);
- err = page_pool_init(pool, params);
+ err = page_pool_init(pool, params, cpuid);
if (err < 0)
goto err_free;
@@ -282,6 +299,16 @@ err_free:
kfree(pool);
return ERR_PTR(err);
}
+EXPORT_SYMBOL(page_pool_create_percpu);
+
+/**
+ * page_pool_create() - create a page pool
+ * @params: parameters, see struct page_pool_params
+ */
+struct page_pool *page_pool_create(const struct page_pool_params *params)
+{
+ return page_pool_create_percpu(params, -1);
+}
EXPORT_SYMBOL(page_pool_create);
static void page_pool_return_page(struct page_pool *pool, struct page *page);
@@ -630,6 +657,11 @@ static bool page_pool_recycle_in_cache(struct page *page,
return true;
}
+static bool __page_pool_page_can_be_recycled(const struct page *page)
+{
+ return page_ref_count(page) == 1 && !page_is_pfmemalloc(page);
+}
+
/* If the page refcnt == 1, this will try to recycle the page.
* if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
* the configured size min(dma_sync_size, pool->max_len).
@@ -651,7 +683,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
* page is NOT reusable when allocated when system is under
* some pressure. (page_is_pfmemalloc)
*/
- if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
+ if (likely(__page_pool_page_can_be_recycled(page))) {
/* Read barrier done in page_ref_count / READ_ONCE */
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
@@ -766,7 +798,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
if (likely(page_pool_unref_page(page, drain_count)))
return NULL;
- if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
+ if (__page_pool_page_can_be_recycled(page)) {
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, -1);
@@ -934,8 +966,13 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
pool->xdp_mem_id = mem->id;
}
-void page_pool_unlink_napi(struct page_pool *pool)
+static void page_pool_disable_direct_recycling(struct page_pool *pool)
{
+ /* Disable direct recycling based on pool->cpuid.
+ * Paired with READ_ONCE() in napi_pp_put_page().
+ */
+ WRITE_ONCE(pool->cpuid, -1);
+
if (!pool->p.napi)
return;
@@ -947,7 +984,6 @@ void page_pool_unlink_napi(struct page_pool *pool)
WRITE_ONCE(pool->p.napi, NULL);
}
-EXPORT_SYMBOL(page_pool_unlink_napi);
void page_pool_destroy(struct page_pool *pool)
{
@@ -957,7 +993,7 @@ void page_pool_destroy(struct page_pool *pool)
if (!page_pool_put(pool))
return;
- page_pool_unlink_napi(pool);
+ page_pool_disable_direct_recycling(pool);
page_pool_free_frag(pool);
if (!page_pool_release(pool))
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 278294aca66a..3a3277ba167b 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -103,8 +103,6 @@ out:
mutex_unlock(&page_pools_lock);
rtnl_unlock();
- if (skb->len && err == -EMSGSIZE)
- return skb->len;
return err;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bd50e9fe3234..a3d7847ce69d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -483,24 +483,15 @@ EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
*/
static void rtnl_lock_unregistering_all(void)
{
- struct net *net;
- bool unregistering;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&netdev_unregistering_wq, &wait);
for (;;) {
- unregistering = false;
rtnl_lock();
/* We held write locked pernet_ops_rwsem, and parallel
* setup_net() and cleanup_net() are not possible.
*/
- for_each_net(net) {
- if (atomic_read(&net->dev_unreg_count) > 0) {
- unregistering = true;
- break;
- }
- }
- if (!unregistering)
+ if (!atomic_read(&dev_unreg_count))
break;
__rtnl_unlock();
@@ -851,9 +842,22 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
+void netdev_set_operstate(struct net_device *dev, int newstate)
+{
+ unsigned int old = READ_ONCE(dev->operstate);
+
+ do {
+ if (old == newstate)
+ return;
+ } while (!try_cmpxchg(&dev->operstate, &old, newstate));
+
+ netdev_state_change(dev);
+}
+EXPORT_SYMBOL(netdev_set_operstate);
+
static void set_operstate(struct net_device *dev, unsigned char transition)
{
- unsigned char operstate = dev->operstate;
+ unsigned char operstate = READ_ONCE(dev->operstate);
switch (transition) {
case IF_OPER_UP:
@@ -875,12 +879,7 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
break;
}
- if (dev->operstate != operstate) {
- write_lock(&dev_base_lock);
- dev->operstate = operstate;
- write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- }
+ netdev_set_operstate(dev, operstate);
}
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
@@ -1456,17 +1455,18 @@ static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
return 0;
}
-static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
+static int rtnl_fill_link_ifmap(struct sk_buff *skb,
+ const struct net_device *dev)
{
struct rtnl_link_ifmap map;
memset(&map, 0, sizeof(map));
- map.mem_start = dev->mem_start;
- map.mem_end = dev->mem_end;
- map.base_addr = dev->base_addr;
- map.irq = dev->irq;
- map.dma = dev->dma;
- map.port = dev->if_port;
+ map.mem_start = READ_ONCE(dev->mem_start);
+ map.mem_end = READ_ONCE(dev->mem_end);
+ map.base_addr = READ_ONCE(dev->base_addr);
+ map.irq = READ_ONCE(dev->irq);
+ map.dma = READ_ONCE(dev->dma);
+ map.port = READ_ONCE(dev->if_port);
if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
return -EMSGSIZE;
@@ -1612,10 +1612,10 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
bool force)
{
- int ifindex = dev_get_iflink(dev);
+ int iflink = dev_get_iflink(dev);
- if (force || dev->ifindex != ifindex)
- return nla_put_u32(skb, IFLA_LINK, ifindex);
+ if (force || READ_ONCE(dev->ifindex) != iflink)
+ return nla_put_u32(skb, IFLA_LINK, iflink);
return 0;
}
@@ -1699,7 +1699,7 @@ static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
struct netdev_name_node *name_node;
int count = 0;
- list_for_each_entry(name_node, &dev->name_node->list, list) {
+ list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
return -EMSGSIZE;
count++;
@@ -1707,6 +1707,7 @@ static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
return count;
}
+/* RCU protected. */
static int rtnl_fill_prop_list(struct sk_buff *skb,
const struct net_device *dev)
{
@@ -1876,9 +1877,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure;
}
- if (rtnl_fill_link_ifmap(skb, dev))
- goto nla_put_failure;
-
if (dev->addr_len) {
if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
@@ -1928,10 +1926,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
rcu_read_lock();
if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
goto nla_put_failure_rcu;
- rcu_read_unlock();
-
+ if (rtnl_fill_link_ifmap(skb, dev))
+ goto nla_put_failure_rcu;
if (rtnl_fill_prop_list(skb, dev))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
+ rcu_read_unlock();
if (dev->dev.parent &&
nla_put_string(skb, IFLA_PARENT_DEV_NAME,
@@ -2200,25 +2199,22 @@ static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct rtnl_link_ops *kind_ops = NULL;
struct netlink_ext_ack *extack = cb->extack;
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- struct net *tgt_net = net;
- int h, s_h;
- int idx = 0, s_idx;
- struct net_device *dev;
- struct hlist_head *head;
+ unsigned int flags = NLM_F_MULTI;
struct nlattr *tb[IFLA_MAX+1];
+ struct {
+ unsigned long ifindex;
+ } *ctx = (void *)cb->ctx;
+ struct net *tgt_net = net;
u32 ext_filter_mask = 0;
- const struct rtnl_link_ops *kind_ops = NULL;
- unsigned int flags = NLM_F_MULTI;
+ struct net_device *dev;
int master_idx = 0;
int netnsid = -1;
int err, i;
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
if (err < 0) {
if (cb->strict_check)
@@ -2262,36 +2258,18 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
flags |= NLM_F_DUMP_FILTERED;
walk_entries:
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- hlist_for_each_entry(dev, head, index_hlist) {
- if (link_dump_filtered(dev, master_idx, kind_ops))
- goto cont;
- if (idx < s_idx)
- goto cont;
- err = rtnl_fill_ifinfo(skb, dev, net,
- RTM_NEWLINK,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq, 0, flags,
- ext_filter_mask, 0, NULL, 0,
- netnsid, GFP_KERNEL);
-
- if (err < 0) {
- if (likely(skb->len))
- goto out;
-
- goto out_err;
- }
-cont:
- idx++;
- }
+ err = 0;
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
+ if (link_dump_filtered(dev, master_idx, kind_ops))
+ continue;
+ err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq, 0, flags,
+ ext_filter_mask, 0, NULL, 0,
+ netnsid, GFP_KERNEL);
+ if (err < 0)
+ break;
}
-out:
- err = skb->len;
-out_err:
- cb->args[1] = idx;
- cb->args[0] = h;
cb->seq = tgt_net->dev_base_seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
if (netnsid >= 0)
@@ -2983,11 +2961,9 @@ static int do_setlink(const struct sk_buff *skb,
if (tb[IFLA_LINKMODE]) {
unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
- write_lock(&dev_base_lock);
if (dev->link_mode ^ value)
status |= DO_SETLINK_NOTIFY;
- dev->link_mode = value;
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->link_mode, value);
}
if (tb[IFLA_VFINFO_LIST]) {
@@ -6552,6 +6528,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
}
owner = link->owner;
dumpit = link->dumpit;
+ flags = link->flags;
if (type == RTM_GETLINK - RTM_BASE)
min_dump_alloc = rtnl_calcit(skb, nlh);
@@ -6569,6 +6546,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.dump = dumpit,
.min_dump_alloc = min_dump_alloc,
.module = owner,
+ .flags = flags,
};
err = netlink_dump_start(rtnl, skb, nlh, &c);
/* netlink_dump_start() will keep a reference on
diff --git a/net/core/scm.c b/net/core/scm.c
index d0e0852a24d5..9cd4b0a01cd6 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -36,6 +36,7 @@
#include <net/compat.h>
#include <net/scm.h>
#include <net/cls_cgroup.h>
+#include <net/af_unix.h>
/*
@@ -85,6 +86,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
return -ENOMEM;
*fplp = fpl;
fpl->count = 0;
+ fpl->count_unix = 0;
fpl->max = SCM_MAX_FD;
fpl->user = NULL;
}
@@ -109,6 +111,9 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
fput(file);
return -EINVAL;
}
+ if (unix_get_socket(file))
+ fpl->count_unix++;
+
*fpp++ = file;
fpl->count++;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index edbbef563d4d..b99127712e67 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -69,6 +69,7 @@
#include <net/sock.h>
#include <net/checksum.h>
#include <net/gso.h>
+#include <net/hotdata.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/mpls.h>
@@ -88,15 +89,10 @@
#include "dev.h"
#include "sock_destructor.h"
-struct kmem_cache *skbuff_cache __ro_after_init;
-static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#endif
-
-static struct kmem_cache *skb_small_head_cache __ro_after_init;
-
#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
@@ -115,6 +111,24 @@ static struct kmem_cache *skb_small_head_cache __ro_after_init;
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
+/* kcm_write_msgs() relies on casting paged frags to bio_vec to use
+ * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the
+ * netmem is a page.
+ */
+static_assert(offsetof(struct bio_vec, bv_page) ==
+ offsetof(skb_frag_t, netmem));
+static_assert(sizeof_field(struct bio_vec, bv_page) ==
+ sizeof_field(skb_frag_t, netmem));
+
+static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len));
+static_assert(sizeof_field(struct bio_vec, bv_len) ==
+ sizeof_field(skb_frag_t, len));
+
+static_assert(offsetof(struct bio_vec, bv_offset) ==
+ offsetof(skb_frag_t, offset));
+static_assert(sizeof_field(struct bio_vec, bv_offset) ==
+ sizeof_field(skb_frag_t, offset));
+
#undef FN
#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
static const char * const drop_reasons[] = {
@@ -297,7 +311,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
fragsz = SKB_DATA_ALIGN(fragsz);
- return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
+ return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+ align_mask);
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
@@ -309,13 +324,15 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
if (in_hardirq() || irqs_disabled()) {
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
- data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
+ data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
+ align_mask);
} else {
struct napi_alloc_cache *nc;
local_bh_disable();
nc = this_cpu_ptr(&napi_alloc_cache);
- data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
+ data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+ align_mask);
local_bh_enable();
}
return data;
@@ -328,7 +345,7 @@ static struct sk_buff *napi_skb_cache_get(void)
struct sk_buff *skb;
if (unlikely(!nc->skb_count)) {
- nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
+ nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
GFP_ATOMIC,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
@@ -337,7 +354,7 @@ static struct sk_buff *napi_skb_cache_get(void)
}
skb = nc->skb_cache[--nc->skb_count];
- kasan_mempool_unpoison_object(skb, kmem_cache_size(skbuff_cache));
+ kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
return skb;
}
@@ -395,7 +412,7 @@ struct sk_buff *slab_build_skb(void *data)
struct sk_buff *skb;
unsigned int size;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -446,7 +463,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -557,7 +574,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
obj_size = SKB_HEAD_ALIGN(*size);
if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
!(flags & KMALLOC_NOT_NORMAL_BITS)) {
- obj = kmem_cache_alloc_node(skb_small_head_cache,
+ obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
*size = SKB_SMALL_HEAD_CACHE_SIZE;
@@ -565,7 +582,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
- obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
+ obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node);
goto out;
}
@@ -628,7 +645,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
- ? skbuff_fclone_cache : skbuff_cache;
+ ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
@@ -845,17 +862,17 @@ skb_fail:
}
EXPORT_SYMBOL(__napi_alloc_skb);
-void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
- int size, unsigned int truesize)
+void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size, unsigned int truesize)
{
DEBUG_NET_WARN_ON_ONCE(size > truesize);
- skb_fill_page_desc(skb, i, page, off, size);
+ skb_fill_netmem_desc(skb, i, netmem, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
-EXPORT_SYMBOL(skb_add_rx_frag);
+EXPORT_SYMBOL(skb_add_rx_frag_netmem);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize)
@@ -895,6 +912,98 @@ static bool is_pp_page(struct page *page)
return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
}
+int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ unsigned int headroom)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ u32 size, truesize, len, max_head_size, off;
+ struct sk_buff *skb = *pskb, *nskb;
+ int err, i, head_off;
+ void *data;
+
+ /* XDP does not support fraglist so we need to linearize
+ * the skb.
+ */
+ if (skb_has_frag_list(skb))
+ return -EOPNOTSUPP;
+
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom);
+ if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
+ return -ENOMEM;
+
+ size = min_t(u32, skb->len, max_head_size);
+ truesize = SKB_HEAD_ALIGN(size) + headroom;
+ data = page_pool_dev_alloc_va(pool, &truesize);
+ if (!data)
+ return -ENOMEM;
+
+ nskb = napi_build_skb(data, truesize);
+ if (!nskb) {
+ page_pool_free_va(pool, data, true);
+ return -ENOMEM;
+ }
+
+ skb_reserve(nskb, headroom);
+ skb_copy_header(nskb, skb);
+ skb_mark_for_recycle(nskb);
+
+ err = skb_copy_bits(skb, 0, nskb->data, size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+ skb_put(nskb, size);
+
+ head_off = skb_headroom(nskb) - skb_headroom(skb);
+ skb_headers_offset_update(nskb, head_off);
+
+ off = size;
+ len = skb->len - off;
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ struct page *page;
+ u32 page_off;
+
+ size = min_t(u32, len, PAGE_SIZE);
+ truesize = size;
+
+ page = page_pool_dev_alloc(pool, &page_off, &truesize);
+ if (!page) {
+ consume_skb(nskb);
+ return -ENOMEM;
+ }
+
+ skb_add_rx_frag(nskb, i, page, page_off, size, truesize);
+ err = skb_copy_bits(skb, off, page_address(page) + page_off,
+ size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+
+ len -= size;
+ off += size;
+ }
+
+ consume_skb(skb);
+ *pskb = nskb;
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+EXPORT_SYMBOL(skb_pp_cow_data);
+
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog)
+{
+ if (!prog->aux->xdp_has_frags)
+ return -EINVAL;
+
+ return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM);
+}
+EXPORT_SYMBOL(skb_cow_data_for_xdp);
+
#if IS_ENABLED(CONFIG_PAGE_POOL)
bool napi_pp_put_page(struct page *page, bool napi_safe)
{
@@ -923,9 +1032,10 @@ bool napi_pp_put_page(struct page *page, bool napi_safe)
*/
if (napi_safe || in_softirq()) {
const struct napi_struct *napi = READ_ONCE(pp->p.napi);
+ unsigned int cpuid = smp_processor_id();
- allow_direct = napi &&
- READ_ONCE(napi->list_owner) == smp_processor_id();
+ allow_direct = napi && READ_ONCE(napi->list_owner) == cpuid;
+ allow_direct |= READ_ONCE(pp->cpuid) == cpuid;
}
/* Driver set this to memory recycling info. Reset it on recycle.
@@ -981,7 +1091,7 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
static void skb_kfree_head(void *head, unsigned int end_offset)
{
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
- kmem_cache_free(skb_small_head_cache, head);
+ kmem_cache_free(net_hotdata.skb_small_head_cache, head);
else
kfree(head);
}
@@ -1005,9 +1115,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
- if (skb->cloned &&
- atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
- &shinfo->dataref))
+ if (!skb_data_unref(skb, shinfo))
goto exit;
if (skb_zcopy(skb)) {
@@ -1048,7 +1156,7 @@ static void kfree_skbmem(struct sk_buff *skb)
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
- kmem_cache_free(skbuff_cache, skb);
+ kmem_cache_free(net_hotdata.skbuff_cache, skb);
return;
case SKB_FCLONE_ORIG:
@@ -1069,7 +1177,7 @@ static void kfree_skbmem(struct sk_buff *skb)
if (!refcount_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
- kmem_cache_free(skbuff_fclone_cache, fclones);
+ kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones);
}
void skb_release_head_state(struct sk_buff *skb)
@@ -1166,7 +1274,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb,
sa->skb_array[sa->skb_count++] = skb;
if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
- kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE,
sa->skb_array);
sa->skb_count = 0;
}
@@ -1191,7 +1299,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
}
if (sa.skb_count)
- kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array);
}
EXPORT_SYMBOL(kfree_skb_list_reason);
@@ -1353,9 +1461,9 @@ static void napi_skb_cache_put(struct sk_buff *skb)
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_mempool_unpoison_object(nc->skb_cache[i],
- kmem_cache_size(skbuff_cache));
+ kmem_cache_size(net_hotdata.skbuff_cache));
- kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);
nc->skb_count = NAPI_SKB_CACHE_HALF;
}
@@ -1906,10 +2014,11 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
/* skb frags point to kernel buffers */
for (i = 0; i < new_frags - 1; i++) {
- __skb_fill_page_desc(skb, i, head, 0, psize);
+ __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize);
head = (struct page *)page_private(head);
}
- __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
+ __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0,
+ d_off);
skb_shinfo(skb)->nr_frags = new_frags;
release:
@@ -1951,7 +2060,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- n = kmem_cache_alloc(skbuff_cache, gfp_mask);
+ n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask);
if (!n)
return NULL;
@@ -3647,7 +3756,8 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
if (plen) {
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
- __skb_fill_page_desc(to, 0, page, offset, plen);
+ __skb_fill_netmem_desc(to, 0, page_to_netmem(page),
+ offset, plen);
get_page(page);
j = 1;
len -= plen;
@@ -4889,7 +4999,7 @@ static void skb_extensions_init(void) {}
void __init skb_init(void)
{
- skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
+ net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|
@@ -4897,7 +5007,7 @@ void __init skb_init(void)
offsetof(struct sk_buff, cb),
sizeof_field(struct sk_buff, cb),
NULL);
- skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
+ net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
sizeof(struct sk_buff_fclones),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
@@ -4906,7 +5016,7 @@ void __init skb_init(void)
* struct skb_shared_info is located at the end of skb->head,
* and should not be copied to/from user.
*/
- skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
+ net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
SKB_SMALL_HEAD_CACHE_SIZE,
0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC,
@@ -5779,7 +5889,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
if (head_stolen) {
skb_release_head_state(skb);
- kmem_cache_free(skbuff_cache, skb);
+ kmem_cache_free(net_hotdata.skbuff_cache, skb);
} else {
__kfree_skb(skb);
}
@@ -6737,6 +6847,14 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
xfrm_state_hold(sp->xvec[i]);
}
#endif
+#ifdef CONFIG_MCTP_FLOWS
+ if (old_active & (1 << SKB_EXT_MCTP)) {
+ struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
+
+ if (flow->key)
+ refcount_inc(&flow->key->refs);
+ }
+#endif
__skb_ext_put(old);
return new;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 5e78798456fd..43bf3818c19e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -283,6 +283,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
EXPORT_SYMBOL(sysctl_rmem_max);
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
+int sysctl_mem_pcpu_rsv __read_mostly = SK_MEMORY_PCPU_RESERVE;
int sysctl_tstamp_allow_data __read_mostly = 1;
@@ -2052,8 +2053,9 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
- memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
- prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
+ unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
+ prot->obj_size - offsetof(struct sock, sk_dontcopy_end),
+ /* alloc is larger than struct, see sk_prot_alloc() */);
#ifdef CONFIG_SECURITY_NETWORK
nsk->sk_security = sptr;
@@ -2582,8 +2584,18 @@ EXPORT_SYMBOL(sock_efree);
#ifdef CONFIG_INET
void sock_pfree(struct sk_buff *skb)
{
- if (sk_is_refcounted(skb->sk))
- sock_gen_put(skb->sk);
+ struct sock *sk = skb->sk;
+
+ if (!sk_is_refcounted(sk))
+ return;
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
+ inet_reqsk(sk)->rsk_listener = NULL;
+ reqsk_free(inet_reqsk(sk));
+ return;
+ }
+
+ sock_gen_put(sk);
}
EXPORT_SYMBOL(sock_pfree);
#endif /* CONFIG_INET */
@@ -4223,3 +4235,65 @@ int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return sock_ioctl_out(sk, cmd, arg);
}
EXPORT_SYMBOL(sk_ioctl);
+
+static int __init sock_struct_check(void)
+{
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag);
+ return 0;
+}
+
+core_initcall(sock_struct_check);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b1e29e18d1d6..654122838025 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -16,9 +16,10 @@
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
-static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
-static DEFINE_MUTEX(sock_diag_table_mutex);
+static const struct sock_diag_handler __rcu *sock_diag_handlers[AF_MAX];
+
+static struct sock_diag_inet_compat __rcu *inet_rcv_compat;
+
static struct workqueue_struct *broadcast_wq;
DEFINE_COOKIE(sock_cookie);
@@ -122,6 +123,24 @@ static size_t sock_diag_nlmsg_size(void)
+ nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
}
+static const struct sock_diag_handler *sock_diag_lock_handler(int family)
+{
+ const struct sock_diag_handler *handler;
+
+ rcu_read_lock();
+ handler = rcu_dereference(sock_diag_handlers[family]);
+ if (handler && !try_module_get(handler->owner))
+ handler = NULL;
+ rcu_read_unlock();
+
+ return handler;
+}
+
+static void sock_diag_unlock_handler(const struct sock_diag_handler *handler)
+{
+ module_put(handler->owner);
+}
+
static void sock_diag_broadcast_destroy_work(struct work_struct *work)
{
struct broadcast_sk *bsk =
@@ -138,12 +157,12 @@ static void sock_diag_broadcast_destroy_work(struct work_struct *work)
if (!skb)
goto out;
- mutex_lock(&sock_diag_table_mutex);
- hndl = sock_diag_handlers[sk->sk_family];
- if (hndl && hndl->get_info)
- err = hndl->get_info(skb, sk);
- mutex_unlock(&sock_diag_table_mutex);
-
+ hndl = sock_diag_lock_handler(sk->sk_family);
+ if (hndl) {
+ if (hndl->get_info)
+ err = hndl->get_info(skb, sk);
+ sock_diag_unlock_handler(hndl);
+ }
if (!err)
nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
GFP_KERNEL);
@@ -166,51 +185,45 @@ void sock_diag_broadcast_destroy(struct sock *sk)
queue_work(broadcast_wq, &bsk->work);
}
-void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr)
{
- mutex_lock(&sock_diag_table_mutex);
- inet_rcv_compat = fn;
- mutex_unlock(&sock_diag_table_mutex);
+ xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat,
+ ptr);
}
EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
-void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr)
{
- mutex_lock(&sock_diag_table_mutex);
- inet_rcv_compat = NULL;
- mutex_unlock(&sock_diag_table_mutex);
+ const struct sock_diag_inet_compat *old;
+
+ old = xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat,
+ NULL);
+ WARN_ON_ONCE(old != ptr);
}
EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
int sock_diag_register(const struct sock_diag_handler *hndl)
{
- int err = 0;
+ int family = hndl->family;
- if (hndl->family >= AF_MAX)
+ if (family >= AF_MAX)
return -EINVAL;
- mutex_lock(&sock_diag_table_mutex);
- if (sock_diag_handlers[hndl->family])
- err = -EBUSY;
- else
- sock_diag_handlers[hndl->family] = hndl;
- mutex_unlock(&sock_diag_table_mutex);
-
- return err;
+ return !cmpxchg((const struct sock_diag_handler **)
+ &sock_diag_handlers[family],
+ NULL, hndl) ? 0 : -EBUSY;
}
EXPORT_SYMBOL_GPL(sock_diag_register);
-void sock_diag_unregister(const struct sock_diag_handler *hnld)
+void sock_diag_unregister(const struct sock_diag_handler *hndl)
{
- int family = hnld->family;
+ int family = hndl->family;
if (family >= AF_MAX)
return;
- mutex_lock(&sock_diag_table_mutex);
- BUG_ON(sock_diag_handlers[family] != hnld);
- sock_diag_handlers[family] = NULL;
- mutex_unlock(&sock_diag_table_mutex);
+ xchg((const struct sock_diag_handler **)&sock_diag_handlers[family],
+ NULL);
}
EXPORT_SYMBOL_GPL(sock_diag_unregister);
@@ -227,20 +240,20 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
- if (sock_diag_handlers[req->sdiag_family] == NULL)
+ if (!rcu_access_pointer(sock_diag_handlers[req->sdiag_family]))
sock_load_diag_module(req->sdiag_family, 0);
- mutex_lock(&sock_diag_table_mutex);
- hndl = sock_diag_handlers[req->sdiag_family];
+ hndl = sock_diag_lock_handler(req->sdiag_family);
if (hndl == NULL)
- err = -ENOENT;
- else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
+ return -ENOENT;
+
+ if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
err = hndl->dump(skb, nlh);
else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy)
err = hndl->destroy(skb, nlh);
else
err = -EOPNOTSUPP;
- mutex_unlock(&sock_diag_table_mutex);
+ sock_diag_unlock_handler(hndl);
return err;
}
@@ -248,20 +261,27 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ const struct sock_diag_inet_compat *ptr;
int ret;
switch (nlh->nlmsg_type) {
case TCPDIAG_GETSOCK:
case DCCPDIAG_GETSOCK:
- if (inet_rcv_compat == NULL)
+
+ if (!rcu_access_pointer(inet_rcv_compat))
sock_load_diag_module(AF_INET, 0);
- mutex_lock(&sock_diag_table_mutex);
- if (inet_rcv_compat != NULL)
- ret = inet_rcv_compat(skb, nlh);
- else
- ret = -EOPNOTSUPP;
- mutex_unlock(&sock_diag_table_mutex);
+ rcu_read_lock();
+ ptr = rcu_dereference(inet_rcv_compat);
+ if (ptr && !try_module_get(ptr->owner))
+ ptr = NULL;
+ rcu_read_unlock();
+
+ ret = -EOPNOTSUPP;
+ if (ptr) {
+ ret = ptr->fn(skb, nlh);
+ module_put(ptr->owner);
+ }
return ret;
case SOCK_DIAG_BY_FAMILY:
@@ -272,13 +292,9 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
}
}
-static DEFINE_MUTEX(sock_diag_mutex);
-
static void sock_diag_rcv(struct sk_buff *skb)
{
- mutex_lock(&sock_diag_mutex);
netlink_rcv_skb(skb, &sock_diag_rcv_msg);
- mutex_unlock(&sock_diag_mutex);
}
static int sock_diag_bind(struct net *net, int group)
@@ -286,12 +302,12 @@ static int sock_diag_bind(struct net *net, int group)
switch (group) {
case SKNLGRP_INET_TCP_DESTROY:
case SKNLGRP_INET_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET])
+ if (!rcu_access_pointer(sock_diag_handlers[AF_INET]))
sock_load_diag_module(AF_INET, 0);
break;
case SKNLGRP_INET6_TCP_DESTROY:
case SKNLGRP_INET6_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET6])
+ if (!rcu_access_pointer(sock_diag_handlers[AF_INET6]))
sock_load_diag_module(AF_INET6, 0);
break;
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0f0cb1465e08..6973dda3abda 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -23,6 +23,8 @@
#include <net/net_ratelimit.h>
#include <net/busy_poll.h>
#include <net/pkt_sched.h>
+#include <net/hotdata.h>
+#include <net/rps.h>
#include "dev.h"
@@ -30,6 +32,7 @@ static int int_3600 = 3600;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
+static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -137,7 +140,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_lock(&sock_flow_mutex);
- orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
+ orig_sock_table = rcu_dereference_protected(
+ net_hotdata.rps_sock_flow_table,
lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
@@ -158,7 +162,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_unlock(&sock_flow_mutex);
return -ENOMEM;
}
- rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
+ net_hotdata.rps_cpu_mask =
+ roundup_pow_of_two(nr_cpu_ids) - 1;
sock_table->mask = size - 1;
} else
sock_table = orig_sock_table;
@@ -169,7 +174,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
sock_table = NULL;
if (sock_table != orig_sock_table) {
- rcu_assign_pointer(rps_sock_flow_table, sock_table);
+ rcu_assign_pointer(net_hotdata.rps_sock_flow_table,
+ sock_table);
if (sock_table) {
static_branch_inc(&rps_needed);
static_branch_inc(&rfs_needed);
@@ -299,8 +305,8 @@ static int proc_do_dev_weight(struct ctl_table *table, int write,
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
weight = READ_ONCE(weight_p);
- WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
- WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
+ WRITE_ONCE(net_hotdata.dev_rx_weight, weight * dev_weight_rx_bias);
+ WRITE_ONCE(net_hotdata.dev_tx_weight, weight * dev_weight_tx_bias);
}
mutex_unlock(&dev_weight_mutex);
@@ -408,6 +414,14 @@ static struct ctl_table net_core_table[] = {
.extra1 = &min_rcvbuf,
},
{
+ .procname = "mem_pcpu_rsv",
+ .data = &sysctl_mem_pcpu_rsv,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_mem_pcpu_rsv,
+ },
+ {
.procname = "dev_weight",
.data = &weight_p,
.maxlen = sizeof(int),
@@ -430,7 +444,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "netdev_max_backlog",
- .data = &netdev_max_backlog,
+ .data = &net_hotdata.max_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -489,7 +503,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_tstamp_prequeue",
- .data = &netdev_tstamp_prequeue,
+ .data = &net_hotdata.tstamp_prequeue,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -567,7 +581,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_budget",
- .data = &netdev_budget,
+ .data = &net_hotdata.netdev_budget,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -590,7 +604,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "netdev_budget_usecs",
- .data = &netdev_budget_usecs,
+ .data = &net_hotdata.netdev_budget_usecs,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -623,7 +637,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "gro_normal_batch",
- .data = &gro_normal_batch,
+ .data = &net_hotdata.gro_normal_batch,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 4869c1c2d8f3..41693154e426 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -16,6 +16,7 @@
#include <linux/bug.h>
#include <net/page_pool/helpers.h>
+#include <net/hotdata.h>
#include <net/xdp.h>
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
@@ -75,7 +76,7 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
xa = container_of(rcu, struct xdp_mem_allocator, rcu);
/* Allow this ID to be reused */
- ida_simple_remove(&mem_id_pool, xa->mem.id);
+ ida_free(&mem_id_pool, xa->mem.id);
kfree(xa);
}
@@ -242,7 +243,7 @@ static int __mem_id_cyclic_get(gfp_t gfp)
int id;
again:
- id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
+ id = ida_alloc_range(&mem_id_pool, mem_id_next, MEM_ID_MAX - 1, gfp);
if (id < 0) {
if (id == -ENOSPC) {
/* Cyclic allocator, reset next id */
@@ -317,7 +318,7 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
/* Insert allocator into ID lookup table */
ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
if (IS_ERR(ptr)) {
- ida_simple_remove(&mem_id_pool, mem->id);
+ ida_free(&mem_id_pool, mem->id);
mem->id = 0;
errno = PTR_ERR(ptr);
goto err;
@@ -589,7 +590,7 @@ EXPORT_SYMBOL_GPL(xdp_warn);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
{
- n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
+ n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs);
if (unlikely(!n_skb))
return -ENOMEM;
@@ -658,7 +659,7 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -771,11 +772,11 @@ __bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
__bpf_kfunc_end_defs();
-BTF_SET8_START(xdp_metadata_kfunc_ids)
+BTF_KFUNCS_START(xdp_metadata_kfunc_ids)
#define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
-BTF_SET8_END(xdp_metadata_kfunc_ids)
+BTF_KFUNCS_END(xdp_metadata_kfunc_ids)
static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index c4bbac99740d..1cba001bb4c8 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -376,15 +376,11 @@ EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
int __init dccp_ackvec_init(void)
{
- dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
- sizeof(struct dccp_ackvec), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ dccp_ackvec_slab = KMEM_CACHE(dccp_ackvec, SLAB_HWCACHE_ALIGN);
if (dccp_ackvec_slab == NULL)
goto out_err;
- dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
- sizeof(struct dccp_ackvec_record),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ dccp_ackvec_record_slab = KMEM_CACHE(dccp_ackvec_record, SLAB_HWCACHE_ALIGN);
if (dccp_ackvec_record_slab == NULL)
goto out_destroy_slab;
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 8a82c5a2c5a8..f5019d95c3ae 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -58,6 +58,7 @@ static int dccp_diag_dump_one(struct netlink_callback *cb,
}
static const struct inet_diag_handler dccp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = dccp_diag_dump,
.dump_one = dccp_diag_dump_one,
.idiag_get_info = dccp_diag_get_info,
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index c81cf2dd154f..f9786d51f68f 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -198,7 +198,7 @@ static const struct nla_policy devlink_eswitch_set_nl_policy[DEVLINK_ATTR_ESWITC
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_MAX(NLA_U16, 1),
- [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U16, 3),
+ [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U8, 3),
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = NLA_POLICY_MAX(NLA_U8, 1),
};
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index ac7be864e80d..09d2f5d4b3dd 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
-#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <net/dsa_stubs.h>
#include <net/sch_generic.h>
@@ -626,7 +625,6 @@ static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
static int dsa_switch_setup(struct dsa_switch *ds)
{
- struct device_node *dn;
int err;
if (ds->setup)
@@ -666,10 +664,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
dsa_user_mii_bus_init(ds);
- dn = of_get_child_by_name(ds->dev->of_node, "mdio");
-
- err = of_mdiobus_register(ds->user_mii_bus, dn);
- of_node_put(dn);
+ err = mdiobus_register(ds->user_mii_bus);
if (err < 0)
goto free_user_mii_bus;
}
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 2717e9d7b612..1aba1d05c27a 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -75,7 +75,7 @@ sja1105_tagger_private(struct dsa_switch *ds)
}
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
-static inline bool sja1105_is_link_local(const struct sk_buff *skb)
+static bool sja1105_is_link_local(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 dmac = ether_addr_to_u64(hdr->h_dest);
@@ -121,7 +121,7 @@ static void sja1105_meta_unpack(const struct sk_buff *skb,
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
}
-static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
+static bool sja1105_is_meta_frame(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 smac = ether_addr_to_u64(hdr->h_source);
diff --git a/net/dsa/user.c b/net/dsa/user.c
index b15e71cc342c..16d395bb1a1f 100644
--- a/net/dsa/user.c
+++ b/net/dsa/user.c
@@ -210,7 +210,7 @@ static int dsa_user_sync_uc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_unsync_uc(struct net_device *dev,
@@ -230,7 +230,7 @@ static int dsa_user_unsync_uc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_sync_mc(struct net_device *dev,
@@ -250,7 +250,7 @@ static int dsa_user_sync_mc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_unsync_mc(struct net_device *dev,
@@ -270,7 +270,7 @@ static int dsa_user_unsync_mc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
void dsa_user_sync_ha(struct net_device *dev)
@@ -352,7 +352,7 @@ void dsa_user_mii_bus_init(struct dsa_switch *ds)
/* user device handling ****************************************************/
static int dsa_user_get_iflink(const struct net_device *dev)
{
- return dsa_user_to_conduit(dev)->ifindex;
+ return READ_ONCE(dsa_user_to_conduit(dev)->ifindex);
}
static int dsa_user_open(struct net_device *dev)
@@ -875,8 +875,8 @@ static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx,
return err;
}
-static inline netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
- struct sk_buff *skb)
+static netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
+ struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
struct dsa_user_priv *p = netdev_priv(dev);
@@ -1222,7 +1222,7 @@ static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
return ret;
}
-static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
@@ -1242,7 +1242,7 @@ static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phylink_ethtool_set_eee(dp->pl, e);
}
-static int dsa_user_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
@@ -2429,7 +2429,7 @@ static const struct net_device_ops dsa_user_netdev_ops = {
.ndo_fill_forward_path = dsa_user_fill_forward_path,
};
-static struct device_type dsa_type = {
+static const struct device_type dsa_type = {
.name = "dsa",
};
@@ -2625,11 +2625,7 @@ int dsa_user_create(struct dsa_port *port)
user_dev->vlan_features = conduit->vlan_features;
p = netdev_priv(user_dev);
- user_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!user_dev->tstats) {
- free_netdev(user_dev);
- return -ENOMEM;
- }
+ user_dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
ret = gro_cells_init(&p->gcells, user_dev);
if (ret)
@@ -2695,7 +2691,6 @@ out_phy:
out_gcells:
gro_cells_destroy(&p->gcells);
out_free:
- free_percpu(user_dev->tstats);
free_netdev(user_dev);
port->user = NULL;
return ret;
@@ -2716,7 +2711,6 @@ void dsa_user_destroy(struct net_device *user_dev)
dsa_port_phylink_destroy(dp);
gro_cells_destroy(&p->gcells);
- free_percpu(user_dev->tstats);
free_netdev(user_dev);
}
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
index 2853394d06a8..bf398973eb8a 100644
--- a/net/ethtool/eee.c
+++ b/net/ethtool/eee.c
@@ -4,16 +4,13 @@
#include "common.h"
#include "bitset.h"
-#define EEE_MODES_COUNT \
- (sizeof_field(struct ethtool_eee, supported) * BITS_PER_BYTE)
-
struct eee_req_info {
struct ethnl_req_info base;
};
struct eee_reply_data {
struct ethnl_reply_data base;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define EEE_REPDATA(__reply_base) \
@@ -30,6 +27,7 @@ static int eee_prepare_data(const struct ethnl_req_info *req_base,
{
struct eee_reply_data *data = EEE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
+ struct ethtool_keee *eee = &data->eee;
int ret;
if (!dev->ethtool_ops->get_eee)
@@ -37,7 +35,7 @@ static int eee_prepare_data(const struct ethnl_req_info *req_base,
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- ret = dev->ethtool_ops->get_eee(dev, &data->eee);
+ ret = dev->ethtool_ops->get_eee(dev, eee);
ethnl_ops_complete(dev);
return ret;
@@ -48,24 +46,21 @@ static int eee_reply_size(const struct ethnl_req_info *req_base,
{
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
const struct eee_reply_data *data = EEE_REPDATA(reply_base);
- const struct ethtool_eee *eee = &data->eee;
+ const struct ethtool_keee *eee = &data->eee;
int len = 0;
int ret;
- BUILD_BUG_ON(sizeof(eee->advertised) * BITS_PER_BYTE !=
- EEE_MODES_COUNT);
- BUILD_BUG_ON(sizeof(eee->lp_advertised) * BITS_PER_BYTE !=
- EEE_MODES_COUNT);
-
/* MODES_OURS */
- ret = ethnl_bitset32_size(&eee->advertised, &eee->supported,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_bitset_size(eee->advertised, eee->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
len += ret;
/* MODES_PEERS */
- ret = ethnl_bitset32_size(&eee->lp_advertised, NULL,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_bitset_size(eee->lp_advertised, NULL,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
len += ret;
@@ -84,24 +79,26 @@ static int eee_fill_reply(struct sk_buff *skb,
{
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
const struct eee_reply_data *data = EEE_REPDATA(reply_base);
- const struct ethtool_eee *eee = &data->eee;
+ const struct ethtool_keee *eee = &data->eee;
int ret;
- ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_OURS,
- &eee->advertised, &eee->supported,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_OURS,
+ eee->advertised, eee->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
- ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_PEER,
- &eee->lp_advertised, NULL, EEE_MODES_COUNT,
- link_mode_names, compact);
+ ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_PEER,
+ eee->lp_advertised, NULL,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
- if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, !!eee->eee_active) ||
- nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, !!eee->eee_enabled) ||
+ if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, eee->eee_active) ||
+ nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, eee->eee_enabled) ||
nla_put_u8(skb, ETHTOOL_A_EEE_TX_LPI_ENABLED,
- !!eee->tx_lpi_enabled) ||
+ eee->tx_lpi_enabled) ||
nla_put_u32(skb, ETHTOOL_A_EEE_TX_LPI_TIMER, eee->tx_lpi_timer))
return -EMSGSIZE;
@@ -132,7 +129,7 @@ ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info)
{
struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- struct ethtool_eee eee = {};
+ struct ethtool_keee eee = {};
bool mod = false;
int ret;
@@ -140,14 +137,15 @@ ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info)
if (ret < 0)
return ret;
- ret = ethnl_update_bitset32(&eee.advertised, EEE_MODES_COUNT,
- tb[ETHTOOL_A_EEE_MODES_OURS],
- link_mode_names, info->extack, &mod);
+ ret = ethnl_update_bitset(eee.advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ tb[ETHTOOL_A_EEE_MODES_OURS],
+ link_mode_names, info->extack, &mod);
if (ret < 0)
return ret;
- ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
- ethnl_update_bool32(&eee.tx_lpi_enabled,
- tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
+ ethnl_update_bool(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
+ ethnl_update_bool(&eee.tx_lpi_enabled, tb[ETHTOOL_A_EEE_TX_LPI_ENABLED],
+ &mod);
ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
&mod);
if (!mod)
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 7519b0818b91..5a55270aa86e 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -26,12 +26,12 @@
#include <linux/sched/signal.h>
#include <linux/net.h>
#include <linux/pm_runtime.h>
+#include <linux/utsname.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock_drv.h>
#include <net/flow_offload.h>
#include <linux/ethtool_netlink.h>
-#include <generated/utsrelease.h>
#include "common.h"
/* State held across locks and calls for commands which have devlink fallback */
@@ -713,7 +713,8 @@ ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp)
struct device *parent = dev->dev.parent;
rsp->info.cmd = ETHTOOL_GDRVINFO;
- strscpy(rsp->info.version, UTS_RELEASE, sizeof(rsp->info.version));
+ strscpy(rsp->info.version, init_uts_ns.name.release,
+ sizeof(rsp->info.version));
if (ops->get_drvinfo) {
ops->get_drvinfo(dev, &rsp->info);
if (!rsp->info.bus_info[0] && parent)
@@ -1508,22 +1509,57 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
return 0;
}
+static void eee_to_keee(struct ethtool_keee *keee,
+ const struct ethtool_eee *eee)
+{
+ memset(keee, 0, sizeof(*keee));
+
+ keee->eee_enabled = eee->eee_enabled;
+ keee->tx_lpi_enabled = eee->tx_lpi_enabled;
+ keee->tx_lpi_timer = eee->tx_lpi_timer;
+
+ ethtool_convert_legacy_u32_to_link_mode(keee->advertised,
+ eee->advertised);
+}
+
+static void keee_to_eee(struct ethtool_eee *eee,
+ const struct ethtool_keee *keee)
+{
+ bool overflow;
+
+ memset(eee, 0, sizeof(*eee));
+
+ eee->eee_active = keee->eee_active;
+ eee->eee_enabled = keee->eee_enabled;
+ eee->tx_lpi_enabled = keee->tx_lpi_enabled;
+ eee->tx_lpi_timer = keee->tx_lpi_timer;
+
+ overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported,
+ keee->supported);
+ ethtool_convert_link_mode_to_legacy_u32(&eee->advertised,
+ keee->advertised);
+ ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised,
+ keee->lp_advertised);
+ if (overflow)
+ pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n");
+}
+
static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_eee edata;
+ struct ethtool_keee keee;
+ struct ethtool_eee eee;
int rc;
if (!dev->ethtool_ops->get_eee)
return -EOPNOTSUPP;
- memset(&edata, 0, sizeof(struct ethtool_eee));
- edata.cmd = ETHTOOL_GEEE;
- rc = dev->ethtool_ops->get_eee(dev, &edata);
-
+ memset(&keee, 0, sizeof(keee));
+ rc = dev->ethtool_ops->get_eee(dev, &keee);
if (rc)
return rc;
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ keee_to_eee(&eee, &keee);
+ if (copy_to_user(useraddr, &eee, sizeof(eee)))
return -EFAULT;
return 0;
@@ -1531,16 +1567,18 @@ static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_eee edata;
+ struct ethtool_keee keee;
+ struct ethtool_eee eee;
int ret;
if (!dev->ethtool_ops->set_eee)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ if (copy_from_user(&eee, useraddr, sizeof(eee)))
return -EFAULT;
- ret = dev->ethtool_ops->set_eee(dev, &edata);
+ eee_to_keee(&keee, &eee);
+ ret = dev->ethtool_ops->set_eee(dev, &keee);
if (!ret)
ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
return ret;
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index fe3553f60bf3..bd04f28d5cf4 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -477,11 +477,7 @@ out:
return ret;
}
-/* Default ->dumpit() handler for GET requests. Device iteration copied from
- * rtnl_dump_ifinfo(); we have to be more careful about device hashtable
- * persistence as we cannot guarantee to hold RTNL lock through the whole
- * function as rtnetnlink does.
- */
+/* Default ->dumpit() handler for GET requests. */
static int ethnl_default_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -490,14 +486,14 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
struct net_device *dev;
int ret = 0;
- rtnl_lock();
+ rcu_read_lock();
for_each_netdev_dump(net, dev, ctx->pos_ifindex) {
dev_hold(dev);
- rtnl_unlock();
+ rcu_read_unlock();
ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb));
- rtnl_lock();
+ rcu_read_lock();
dev_put(dev);
if (ret < 0 && ret != -EOPNOTSUPP) {
@@ -507,7 +503,7 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
}
ret = 0;
}
- rtnl_unlock();
+ rcu_read_unlock();
return ret;
}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 9d71b66183da..c98b5b71ad7c 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -28,29 +28,19 @@ static bool is_slave_up(struct net_device *dev)
return dev && is_admin_up(dev) && netif_oper_up(dev);
}
-static void __hsr_set_operstate(struct net_device *dev, int transition)
-{
- write_lock(&dev_base_lock);
- if (dev->operstate != transition) {
- dev->operstate = transition;
- write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- } else {
- write_unlock(&dev_base_lock);
- }
-}
-
static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
{
- if (!is_admin_up(master->dev)) {
- __hsr_set_operstate(master->dev, IF_OPER_DOWN);
+ struct net_device *dev = master->dev;
+
+ if (!is_admin_up(dev)) {
+ netdev_set_operstate(dev, IF_OPER_DOWN);
return;
}
if (has_carrier)
- __hsr_set_operstate(master->dev, IF_OPER_UP);
+ netdev_set_operstate(dev, IF_OPER_UP);
else
- __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
+ netdev_set_operstate(dev, IF_OPER_LOWERLAYERDOWN);
}
static bool hsr_check_carrier(struct hsr_port *master)
@@ -78,14 +68,14 @@ static void hsr_check_announce(struct net_device *hsr_dev,
hsr = netdev_priv(hsr_dev);
- if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
+ if (READ_ONCE(hsr_dev->operstate) == IF_OPER_UP && old_operstate != IF_OPER_UP) {
/* Went up */
hsr->announce_count = 0;
mod_timer(&hsr->announce_timer,
jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
}
- if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
+ if (READ_ONCE(hsr_dev->operstate) != IF_OPER_UP && old_operstate == IF_OPER_UP)
/* Went down */
del_timer(&hsr->announce_timer);
}
@@ -100,7 +90,7 @@ void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
/* netif_stacked_transfer_operstate() cannot be used here since
* it doesn't set IF_OPER_LOWERLAYERDOWN (?)
*/
- old_operstate = master->dev->operstate;
+ old_operstate = READ_ONCE(master->dev->operstate);
has_carrier = hsr_check_carrier(master);
hsr_set_operstate(master, has_carrier);
hsr_check_announce(master->dev, old_operstate);
@@ -477,7 +467,7 @@ static const struct net_device_ops hsr_device_ops = {
.ndo_set_rx_mode = hsr_set_rx_mode,
};
-static struct device_type hsr_type = {
+static const struct device_type hsr_type = {
.name = "hsr",
};
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 2c087b7f17c5..77b4e92027c5 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -93,7 +93,7 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
static int lowpan_get_iflink(const struct net_device *dev)
{
- return lowpan_802154_dev(dev)->wdev->ifindex;
+ return READ_ONCE(lowpan_802154_dev(dev)->wdev->ifindex);
}
static const struct net_device_ops lowpan_netdev_ops = {
@@ -280,5 +280,6 @@ static void __exit lowpan_cleanup_module(void)
module_init(lowpan_init_module);
module_exit(lowpan_cleanup_module);
+MODULE_DESCRIPTION("IPv6 over Low power Wireless Personal Area Network IEEE 802.15.4 core");
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 00302e8b9615..990a83455dcf 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1137,4 +1137,5 @@ module_init(af_ieee802154_init);
module_exit(af_ieee802154_remove);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IEEE 802.15.4 socket interface");
MODULE_ALIAS_NETPROTO(PF_IEEE802154);
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index d2903933805c..6708160ebf9f 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -93,7 +93,7 @@ static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
#define WPAN_PHY_PM_OPS NULL
#endif
-struct class wpan_phy_class = {
+const struct class wpan_phy_class = {
.name = "ieee802154",
.dev_release = wpan_phy_release,
.dev_groups = pmib_groups,
diff --git a/net/ieee802154/sysfs.h b/net/ieee802154/sysfs.h
index 337545b639e9..69961e166257 100644
--- a/net/ieee802154/sysfs.h
+++ b/net/ieee802154/sysfs.h
@@ -5,6 +5,6 @@
int wpan_phy_sysfs_init(void);
void wpan_phy_sysfs_exit(void);
-extern struct class wpan_phy_class;
+extern const struct class wpan_phy_class;
#endif /* __IEEE802154_SYSFS_H */
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index a5a820ee2026..55bd72997b31 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -119,6 +119,7 @@
#endif
#include <net/l3mdev.h>
#include <net/compat.h>
+#include <net/rps.h>
#include <trace/events/sock.h>
@@ -1103,7 +1104,7 @@ const struct proto_ops inet_dgram_ops = {
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = inet_splice_eof,
- .set_peek_off = sk_set_peek_off,
+ .set_peek_off = udp_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
#endif
@@ -1326,7 +1327,7 @@ int inet_sk_rebuild_header(struct sock *sk)
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
- sk->sk_protocol, RT_CONN_FLAGS(sk),
+ sk->sk_protocol, ip_sock_rt_tos(sk),
sk->sk_bound_dev_if);
if (!IS_ERR(rt)) {
err = 0;
@@ -1751,19 +1752,6 @@ static const struct net_protocol igmp_protocol = {
};
#endif
-static const struct net_protocol tcp_protocol = {
- .handler = tcp_v4_rcv,
- .err_handler = tcp_v4_err,
- .no_policy = 1,
- .icmp_strict_tag_validation = 1,
-};
-
-static const struct net_protocol udp_protocol = {
- .handler = udp_rcv,
- .err_handler = udp_err,
- .no_policy = 1,
-};
-
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.err_handler = icmp_err,
@@ -1904,14 +1892,6 @@ static int ipv4_proc_init(void);
* IP protocol layer initialiser
*/
-static struct packet_offload ip_packet_offload __read_mostly = {
- .type = cpu_to_be16(ETH_P_IP),
- .callbacks = {
- .gso_segment = inet_gso_segment,
- .gro_receive = inet_gro_receive,
- .gro_complete = inet_gro_complete,
- },
-};
static const struct net_offload ipip_offload = {
.callbacks = {
@@ -1938,7 +1918,15 @@ static int __init ipv4_offload_init(void)
if (ipip_offload_init() < 0)
pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
- dev_add_offload(&ip_packet_offload);
+ net_hotdata.ip_packet_offload = (struct packet_offload) {
+ .type = cpu_to_be16(ETH_P_IP),
+ .callbacks = {
+ .gso_segment = inet_gso_segment,
+ .gro_receive = inet_gro_receive,
+ .gro_complete = inet_gro_complete,
+ },
+ };
+ dev_add_offload(&net_hotdata.ip_packet_offload);
return 0;
}
@@ -1992,9 +1980,22 @@ static int __init inet_init(void)
if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
pr_crit("%s: Cannot add ICMP protocol\n", __func__);
- if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
+
+ net_hotdata.udp_protocol = (struct net_protocol) {
+ .handler = udp_rcv,
+ .err_handler = udp_err,
+ .no_policy = 1,
+ };
+ if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
pr_crit("%s: Cannot add UDP protocol\n", __func__);
- if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
+
+ net_hotdata.tcp_protocol = (struct net_protocol) {
+ .handler = tcp_v4_rcv,
+ .err_handler = tcp_v4_err,
+ .no_policy = 1,
+ .icmp_strict_tag_validation = 1,
+ };
+ if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
pr_crit("%s: Cannot add TCP protocol\n", __func__);
#ifdef CONFIG_IP_MULTICAST
if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index ae8b15e6896f..7f518ea5f4ac 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -12,7 +12,7 @@
#include <net/bpf_sk_storage.h>
/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
-extern struct bpf_struct_ops bpf_tcp_congestion_ops;
+static struct bpf_struct_ops bpf_tcp_congestion_ops;
static u32 unsupported_ops[] = {
offsetof(struct tcp_congestion_ops, get_info),
@@ -20,6 +20,7 @@ static u32 unsupported_ops[] = {
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
+static const struct btf_type *tcp_congestion_ops_type;
static int bpf_tcp_ca_init(struct btf *btf)
{
@@ -36,6 +37,11 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
+ type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ tcp_congestion_ops_type = btf_type_by_id(btf, type_id);
+
return 0;
}
@@ -149,7 +155,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog)
u32 midx;
midx = prog->expected_attach_type;
- t = bpf_tcp_congestion_ops.type;
+ t = tcp_congestion_ops_type;
m = &btf_type_member(t)[midx];
return __btf_member_bit_offset(t, m) / 8;
@@ -191,17 +197,17 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
-BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
+BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids)
BTF_ID_FLAGS(func, tcp_reno_ssthresh)
BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
BTF_ID_FLAGS(func, tcp_slow_start)
BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
-BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
+BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids)
static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
.owner = THIS_MODULE,
@@ -339,7 +345,7 @@ static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = {
.release = __bpf_tcp_ca_release,
};
-struct bpf_struct_ops bpf_tcp_congestion_ops = {
+static struct bpf_struct_ops bpf_tcp_congestion_ops = {
.verifier_ops = &bpf_tcp_ca_verifier_ops,
.reg = bpf_tcp_ca_reg,
.unreg = bpf_tcp_ca_unreg,
@@ -350,10 +356,16 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = {
.validate = bpf_tcp_ca_validate,
.name = "tcp_congestion_ops",
.cfi_stubs = &__bpf_ops_tcp_congestion_ops,
+ .owner = THIS_MODULE,
};
static int __init bpf_tcp_ca_kfunc_init(void)
{
- return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
+ int ret;
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
+ ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops);
+
+ return ret;
}
late_initcall(bpf_tcp_ca_kfunc_init);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index d048aa833293..8b17d83e5fde 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -864,11 +864,8 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
net_clen_bits,
net_spot + 1,
1);
- if (net_spot < 0) {
- if (net_spot == -2)
- return -EFAULT;
+ if (net_spot < 0)
return 0;
- }
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 2cc50cbfc2a3..cc6d0bd7b0a9 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -119,7 +119,7 @@ void ip4_datagram_release_cb(struct sock *sk)
rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ ip_sock_rt_tos(sk), sk->sk_bound_dev_if);
dst = !IS_ERR(rt) ? &rt->dst : NULL;
sk_dst_set(sk, dst);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bc74f131fe4d..7a437f0d4190 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -713,34 +713,37 @@ static void check_lifetime(struct work_struct *work)
rcu_read_lock();
hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
- unsigned long age;
+ unsigned long age, tstamp;
+ u32 preferred_lft;
+ u32 valid_lft;
+ u32 flags;
- if (ifa->ifa_flags & IFA_F_PERMANENT)
+ flags = READ_ONCE(ifa->ifa_flags);
+ if (flags & IFA_F_PERMANENT)
continue;
+ preferred_lft = READ_ONCE(ifa->ifa_preferred_lft);
+ valid_lft = READ_ONCE(ifa->ifa_valid_lft);
+ tstamp = READ_ONCE(ifa->ifa_tstamp);
/* We try to batch several events at once. */
- age = (now - ifa->ifa_tstamp +
+ age = (now - tstamp +
ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
- if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
- age >= ifa->ifa_valid_lft) {
+ if (valid_lft != INFINITY_LIFE_TIME &&
+ age >= valid_lft) {
change_needed = true;
- } else if (ifa->ifa_preferred_lft ==
+ } else if (preferred_lft ==
INFINITY_LIFE_TIME) {
continue;
- } else if (age >= ifa->ifa_preferred_lft) {
- if (time_before(ifa->ifa_tstamp +
- ifa->ifa_valid_lft * HZ, next))
- next = ifa->ifa_tstamp +
- ifa->ifa_valid_lft * HZ;
+ } else if (age >= preferred_lft) {
+ if (time_before(tstamp + valid_lft * HZ, next))
+ next = tstamp + valid_lft * HZ;
- if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
+ if (!(flags & IFA_F_DEPRECATED))
change_needed = true;
- } else if (time_before(ifa->ifa_tstamp +
- ifa->ifa_preferred_lft * HZ,
+ } else if (time_before(tstamp + preferred_lft * HZ,
next)) {
- next = ifa->ifa_tstamp +
- ifa->ifa_preferred_lft * HZ;
+ next = tstamp + preferred_lft * HZ;
}
}
rcu_read_unlock();
@@ -804,24 +807,26 @@ static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
__u32 prefered_lft)
{
unsigned long timeout;
+ u32 flags;
- ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
+ flags = ifa->ifa_flags & ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
timeout = addrconf_timeout_fixup(valid_lft, HZ);
if (addrconf_finite_timeout(timeout))
- ifa->ifa_valid_lft = timeout;
+ WRITE_ONCE(ifa->ifa_valid_lft, timeout);
else
- ifa->ifa_flags |= IFA_F_PERMANENT;
+ flags |= IFA_F_PERMANENT;
timeout = addrconf_timeout_fixup(prefered_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
if (timeout == 0)
- ifa->ifa_flags |= IFA_F_DEPRECATED;
- ifa->ifa_preferred_lft = timeout;
+ flags |= IFA_F_DEPRECATED;
+ WRITE_ONCE(ifa->ifa_preferred_lft, timeout);
}
- ifa->ifa_tstamp = jiffies;
+ WRITE_ONCE(ifa->ifa_flags, flags);
+ WRITE_ONCE(ifa->ifa_tstamp, jiffies);
if (!ifa->ifa_cstamp)
- ifa->ifa_cstamp = ifa->ifa_tstamp;
+ WRITE_ONCE(ifa->ifa_cstamp, ifa->ifa_tstamp);
}
static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
@@ -1312,7 +1317,7 @@ static __be32 in_dev_select_addr(const struct in_device *in_dev,
const struct in_ifaddr *ifa;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
- if (ifa->ifa_flags & IFA_F_SECONDARY)
+ if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
continue;
if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope)
@@ -1340,7 +1345,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
localnet_scope = RT_SCOPE_LINK;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
- if (ifa->ifa_flags & IFA_F_SECONDARY)
+ if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
continue;
if (min(ifa->ifa_scope, localnet_scope) > scope)
continue;
@@ -1671,11 +1676,12 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
}
-static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
+static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
struct inet_fill_args *args)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
+ unsigned long tstamp;
u32 preferred, valid;
nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
@@ -1686,7 +1692,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_INET;
ifm->ifa_prefixlen = ifa->ifa_prefixlen;
- ifm->ifa_flags = ifa->ifa_flags;
+ ifm->ifa_flags = READ_ONCE(ifa->ifa_flags);
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
@@ -1694,11 +1700,12 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
goto nla_put_failure;
+ tstamp = READ_ONCE(ifa->ifa_tstamp);
if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
- preferred = ifa->ifa_preferred_lft;
- valid = ifa->ifa_valid_lft;
+ preferred = READ_ONCE(ifa->ifa_preferred_lft);
+ valid = READ_ONCE(ifa->ifa_valid_lft);
if (preferred != INFINITY_LIFE_TIME) {
- long tval = (jiffies - ifa->ifa_tstamp) / HZ;
+ long tval = (jiffies - tstamp) / HZ;
if (preferred > tval)
preferred -= tval;
@@ -1725,10 +1732,10 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
(ifa->ifa_proto &&
nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
- nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
+ nla_put_u32(skb, IFA_FLAGS, ifm->ifa_flags) ||
(ifa->ifa_rt_priority &&
nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
- put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
+ put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
preferred, valid))
goto nla_put_failure;
@@ -1798,15 +1805,15 @@ static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
}
static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
- struct netlink_callback *cb, int s_ip_idx,
+ struct netlink_callback *cb, int *s_ip_idx,
struct inet_fill_args *fillargs)
{
struct in_ifaddr *ifa;
int ip_idx = 0;
int err;
- in_dev_for_each_ifa_rtnl(ifa, in_dev) {
- if (ip_idx < s_ip_idx) {
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ if (ip_idx < *s_ip_idx) {
ip_idx++;
continue;
}
@@ -1818,9 +1825,9 @@ static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
ip_idx++;
}
err = 0;
-
+ ip_idx = 0;
done:
- cb->args[2] = ip_idx;
+ *s_ip_idx = ip_idx;
return err;
}
@@ -1830,7 +1837,7 @@ done:
static u32 inet_base_seq(const struct net *net)
{
u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
- net->dev_base_seq;
+ READ_ONCE(net->dev_base_seq);
/* Must not return 0 (see nl_dump_check_consistent()).
* Chose a value far away from 0.
@@ -1852,75 +1859,51 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
};
struct net *net = sock_net(skb->sk);
struct net *tgt_net = net;
- int h, s_h;
- int idx, s_idx;
- int s_ip_idx;
- struct net_device *dev;
+ struct {
+ unsigned long ifindex;
+ int ip_idx;
+ } *ctx = (void *)cb->ctx;
struct in_device *in_dev;
- struct hlist_head *head;
+ struct net_device *dev;
int err = 0;
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
- s_ip_idx = cb->args[2];
-
+ rcu_read_lock();
if (cb->strict_check) {
err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
skb->sk, cb);
if (err < 0)
- goto put_tgt_net;
+ goto done;
- err = 0;
if (fillargs.ifindex) {
- dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
- if (!dev) {
- err = -ENODEV;
- goto put_tgt_net;
- }
-
- in_dev = __in_dev_get_rtnl(dev);
- if (in_dev) {
- err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
- &fillargs);
- }
- goto put_tgt_net;
- }
- }
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = inet_base_seq(tgt_net);
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- if (h > s_h || idx > s_idx)
- s_ip_idx = 0;
+ err = -ENODEV;
+ dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
+ if (!dev)
+ goto done;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
- goto cont;
-
- err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
- &fillargs);
- if (err < 0) {
- rcu_read_unlock();
goto done;
- }
-cont:
- idx++;
+ err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ goto done;
}
- rcu_read_unlock();
}
+ cb->seq = inet_base_seq(tgt_net);
+
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ continue;
+ err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ if (err < 0)
+ goto done;
+ }
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-put_tgt_net:
if (fillargs.netnsid >= 0)
put_net(tgt_net);
-
- return skb->len ? : err;
+ rcu_read_unlock();
+ return err;
}
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
@@ -1982,7 +1965,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
return -EMSGSIZE;
for (i = 0; i < IPV4_DEVCONF_MAX; i++)
- ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
+ ((u32 *) nla_data(nla))[i] = READ_ONCE(in_dev->cnf.data[i]);
return 0;
}
@@ -2068,9 +2051,9 @@ static int inet_netconf_msgsize_devconf(int type)
}
static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
- struct ipv4_devconf *devconf, u32 portid,
- u32 seq, int event, unsigned int flags,
- int type)
+ const struct ipv4_devconf *devconf,
+ u32 portid, u32 seq, int event,
+ unsigned int flags, int type)
{
struct nlmsghdr *nlh;
struct netconfmsg *ncm;
@@ -2095,27 +2078,28 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
if ((all || type == NETCONFA_FORWARDING) &&
nla_put_s32(skb, NETCONFA_FORWARDING,
- IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_RP_FILTER) &&
nla_put_s32(skb, NETCONFA_RP_FILTER,
- IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
+ IPV4_DEVCONF_RO(*devconf, RP_FILTER)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_MC_FORWARDING) &&
nla_put_s32(skb, NETCONFA_MC_FORWARDING,
- IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, MC_FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_BC_FORWARDING) &&
nla_put_s32(skb, NETCONFA_BC_FORWARDING,
- IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, BC_FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_PROXY_NEIGH) &&
nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
- IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
+ IPV4_DEVCONF_RO(*devconf, PROXY_ARP)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
- IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
+ IPV4_DEVCONF_RO(*devconf,
+ IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
goto nla_put_failure;
out:
@@ -2204,21 +2188,20 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
- struct nlattr *tb[NETCONFA_MAX+1];
+ struct nlattr *tb[NETCONFA_MAX + 1];
+ const struct ipv4_devconf *devconf;
+ struct in_device *in_dev = NULL;
+ struct net_device *dev = NULL;
struct sk_buff *skb;
- struct ipv4_devconf *devconf;
- struct in_device *in_dev;
- struct net_device *dev;
int ifindex;
int err;
err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
if (err)
- goto errout;
+ return err;
- err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
- goto errout;
+ return -EINVAL;
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
switch (ifindex) {
@@ -2229,10 +2212,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
devconf = net->ipv4.devconf_dflt;
break;
default:
- dev = __dev_get_by_index(net, ifindex);
- if (!dev)
- goto errout;
- in_dev = __in_dev_get_rtnl(dev);
+ err = -ENODEV;
+ dev = dev_get_by_index(net, ifindex);
+ if (dev)
+ in_dev = in_dev_get(dev);
if (!in_dev)
goto errout;
devconf = &in_dev->cnf;
@@ -2256,6 +2239,9 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
+ if (in_dev)
+ in_dev_put(in_dev);
+ dev_put(dev);
return err;
}
@@ -2264,11 +2250,13 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx, s_idx;
+ struct {
+ unsigned long ifindex;
+ unsigned int all_default;
+ } *ctx = (void *)cb->ctx;
+ const struct in_device *in_dev;
struct net_device *dev;
- struct in_device *in_dev;
- struct hlist_head *head;
+ int err = 0;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
@@ -2285,64 +2273,45 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
}
}
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = inet_base_seq(net);
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- in_dev = __in_dev_get_rcu(dev);
- if (!in_dev)
- goto cont;
-
- if (inet_netconf_fill_devconf(skb, dev->ifindex,
- &in_dev->cnf,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF,
- NLM_F_MULTI,
- NETCONFA_ALL) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ continue;
+ err = inet_netconf_fill_devconf(skb, dev->ifindex,
+ &in_dev->cnf,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
+ goto done;
}
- if (h == NETDEV_HASHENTRIES) {
- if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
- net->ipv4.devconf_all,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ if (ctx->all_default == 0) {
+ err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
+ net->ipv4.devconf_all,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
- }
- if (h == NETDEV_HASHENTRIES + 1) {
- if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
- net->ipv4.devconf_dflt,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ ctx->all_default++;
+ }
+ if (ctx->all_default == 1) {
+ err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
+ net->ipv4.devconf_dflt,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
+ ctx->all_default++;
}
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-
- return skb->len;
+ rcu_read_unlock();
+ return err;
}
#ifdef CONFIG_SYSCTL
@@ -2823,7 +2792,9 @@ void __init devinet_init(void)
rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
- rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, 0);
+ rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
+ RTNL_FLAG_DUMP_UNLOCKED);
rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
- inet_netconf_dump_devconf, 0);
+ inet_netconf_dump_devconf,
+ RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 390f4be7f7be..48741352a88a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -916,7 +916,8 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
struct rtmsg *rtm;
int err, i;
- ASSERT_RTNL();
+ if (filter->rtnl_held)
+ ASSERT_RTNL();
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
NL_SET_ERR_MSG(extack, "Invalid header for FIB dump request");
@@ -961,7 +962,10 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
break;
case RTA_OIF:
ifindex = nla_get_u32(tb[i]);
- filter->dev = __dev_get_by_index(net, ifindex);
+ if (filter->rtnl_held)
+ filter->dev = __dev_get_by_index(net, ifindex);
+ else
+ filter->dev = dev_get_by_index_rcu(net, ifindex);
if (!filter->dev)
return -ENODEV;
break;
@@ -983,20 +987,24 @@ EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req);
static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct fib_dump_filter filter = { .dump_routes = true,
- .dump_exceptions = true };
+ struct fib_dump_filter filter = {
+ .dump_routes = true,
+ .dump_exceptions = true,
+ .rtnl_held = false,
+ };
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
- int dumped = 0, err;
+ int dumped = 0, err = 0;
+ rcu_read_lock();
if (cb->strict_check) {
err = ip_valid_fib_dump_req(net, nlh, &filter, cb);
if (err < 0)
- return err;
+ goto unlock;
} else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) {
struct rtmsg *rtm = nlmsg_data(nlh);
@@ -1005,29 +1013,26 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
/* ipv4 does not use prefix flag */
if (filter.flags & RTM_F_PREFIX)
- return skb->len;
+ goto unlock;
if (filter.table_id) {
tb = fib_get_table(net, filter.table_id);
if (!tb) {
if (rtnl_msg_family(cb->nlh) != PF_INET)
- return skb->len;
+ goto unlock;
NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
- return -ENOENT;
+ err = -ENOENT;
+ goto unlock;
}
-
- rcu_read_lock();
err = fib_table_dump(tb, skb, cb, &filter);
- rcu_read_unlock();
- return skb->len ? : err;
+ goto unlock;
}
s_h = cb->args[0];
s_e = cb->args[1];
- rcu_read_lock();
-
+ err = 0;
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv4.fib_table_hash[h];
@@ -1038,25 +1043,20 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
err = fib_table_dump(tb, skb, cb, &filter);
- if (err < 0) {
- if (likely(skb->len))
- goto out;
-
- goto out_err;
- }
+ if (err < 0)
+ goto out;
dumped = 1;
next:
e++;
}
}
out:
- err = skb->len;
-out_err:
- rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
+unlock:
+ rcu_read_unlock();
return err;
}
@@ -1659,5 +1659,6 @@ void __init ip_fib_init(void)
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, 0);
+ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib,
+ RTNL_FLAG_DUMP_UNLOCKED);
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 3ff35f811765..f474106464d2 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -501,7 +501,7 @@ static void tnode_free(struct key_vector *tn)
if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) {
tnode_free_size = 0;
- synchronize_rcu();
+ synchronize_net();
}
}
@@ -2368,7 +2368,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
* and key == 0 means the dump has wrapped around and we are done.
*/
if (count && !key)
- return skb->len;
+ return 0;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
int err;
@@ -2394,7 +2394,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
cb->args[3] = key;
cb->args[2] = count;
- return skb->len;
+ return 0;
}
void __init fib_trie_init(void)
diff --git a/net/ipv4/fou_bpf.c b/net/ipv4/fou_bpf.c
index 4da03bf45c9b..06e5572f296f 100644
--- a/net/ipv4/fou_bpf.c
+++ b/net/ipv4/fou_bpf.c
@@ -100,10 +100,10 @@ __bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
__bpf_kfunc_end_defs();
-BTF_SET8_START(fou_kfunc_set)
+BTF_KFUNCS_START(fou_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_set_fou_encap)
BTF_ID_FLAGS(func, bpf_skb_get_fou_encap)
-BTF_SET8_END(fou_kfunc_set)
+BTF_KFUNCS_END(fou_kfunc_set)
static const struct btf_kfunc_id_set fou_bpf_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index 0c41076e31ed..a8494f796dca 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -351,7 +351,7 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
optlen = guehdr->hlen << 2;
len += optlen;
- if (skb_gro_header_hard(skb, len)) {
+ if (!skb_gro_may_pull(skb, len)) {
guehdr = skb_gro_header_slow(skb, len, off);
if (unlikely(!guehdr))
goto out;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 311e70bfce40..5028c72d494a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -174,7 +174,7 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
grehlen += GRE_HEADER_SECTION;
hlen = off + grehlen;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
greh = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!greh))
goto out;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index efeeca2b1328..717e97a389a8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -120,12 +120,12 @@
*/
#define IGMP_V1_SEEN(in_dev) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
((in_dev)->mr_v1_seen && \
time_before(jiffies, (in_dev)->mr_v1_seen)))
#define IGMP_V2_SEEN(in_dev) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
((in_dev)->mr_v2_seen && \
time_before(jiffies, (in_dev)->mr_v2_seen)))
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 459af1f89739..7d8090f109ef 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -906,8 +906,9 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
memcpy(nreq_sk, req_sk,
offsetof(struct sock, sk_dontcopy_begin));
- memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
- req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
+ unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
+ req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end),
+ /* alloc is larger than struct, see above */);
sk_node_init(&nreq_sk->sk_node);
nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
@@ -1467,7 +1468,7 @@ static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *f
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ ip_sock_rt_tos(sk), sk->sk_bound_dev_if);
if (IS_ERR(rt))
rt = NULL;
if (rt)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8e6b6aa0579e..7adace541fe2 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -32,7 +32,7 @@
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
-static const struct inet_diag_handler **inet_diag_table;
+static const struct inet_diag_handler __rcu **inet_diag_table;
struct inet_diag_entry {
const __be32 *saddr;
@@ -48,28 +48,28 @@ struct inet_diag_entry {
#endif
};
-static DEFINE_MUTEX(inet_diag_table_mutex);
-
static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
{
- if (proto < 0 || proto >= IPPROTO_MAX) {
- mutex_lock(&inet_diag_table_mutex);
- return ERR_PTR(-ENOENT);
- }
+ const struct inet_diag_handler *handler;
- if (!inet_diag_table[proto])
+ if (proto < 0 || proto >= IPPROTO_MAX)
+ return NULL;
+
+ if (!READ_ONCE(inet_diag_table[proto]))
sock_load_diag_module(AF_INET, proto);
- mutex_lock(&inet_diag_table_mutex);
- if (!inet_diag_table[proto])
- return ERR_PTR(-ENOENT);
+ rcu_read_lock();
+ handler = rcu_dereference(inet_diag_table[proto]);
+ if (handler && !try_module_get(handler->owner))
+ handler = NULL;
+ rcu_read_unlock();
- return inet_diag_table[proto];
+ return handler;
}
static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
{
- mutex_unlock(&inet_diag_table_mutex);
+ module_put(handler->owner);
}
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
@@ -104,9 +104,12 @@ static size_t inet_sk_attr_size(struct sock *sk,
const struct inet_diag_handler *handler;
size_t aux = 0;
- handler = inet_diag_table[req->sdiag_protocol];
+ rcu_read_lock();
+ handler = rcu_dereference(inet_diag_table[req->sdiag_protocol]);
+ DEBUG_NET_WARN_ON_ONCE(!handler);
if (handler && handler->idiag_get_aux_size)
aux = handler->idiag_get_aux_size(sk, net_admin);
+ rcu_read_unlock();
return nla_total_size(sizeof(struct tcp_info))
+ nla_total_size(sizeof(struct inet_diag_msg))
@@ -244,10 +247,16 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct nlmsghdr *nlh;
struct nlattr *attr;
void *info = NULL;
+ int protocol;
cb_data = cb->data;
- handler = inet_diag_table[inet_diag_get_protocol(req, cb_data)];
- BUG_ON(!handler);
+ protocol = inet_diag_get_protocol(req, cb_data);
+
+ /* inet_diag_lock_handler() made sure inet_diag_table[] is stable. */
+ handler = rcu_dereference_protected(inet_diag_table[protocol], 1);
+ DEBUG_NET_WARN_ON_ONCE(!handler);
+ if (!handler)
+ return -ENXIO;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
@@ -605,9 +614,10 @@ static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
protocol = inet_diag_get_protocol(req, &dump_data);
handler = inet_diag_lock_handler(protocol);
- if (IS_ERR(handler)) {
- err = PTR_ERR(handler);
- } else if (cmd == SOCK_DIAG_BY_FAMILY) {
+ if (!handler)
+ return -ENOENT;
+
+ if (cmd == SOCK_DIAG_BY_FAMILY) {
struct netlink_callback cb = {
.nlh = nlh,
.skb = in_skb,
@@ -1035,6 +1045,10 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
num = 0;
ilb = &hashinfo->lhash2[i];
+ if (hlist_nulls_empty(&ilb->nulls_head)) {
+ s_num = 0;
+ continue;
+ }
spin_lock(&ilb->lock);
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);
@@ -1099,6 +1113,10 @@ resume_bind_walk:
accum = 0;
ibb = &hashinfo->bhash2[i];
+ if (hlist_empty(&ibb->chain)) {
+ s_num = 0;
+ continue;
+ }
spin_lock_bh(&ibb->lock);
inet_bind_bucket_for_each(tb2, &ibb->chain) {
if (!net_eq(ib2_net(tb2), net))
@@ -1259,12 +1277,12 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
again:
prev_min_dump_alloc = cb->min_dump_alloc;
handler = inet_diag_lock_handler(protocol);
- if (!IS_ERR(handler))
+ if (handler) {
handler->dump(skb, cb, r);
- else
- err = PTR_ERR(handler);
- inet_diag_unlock_handler(handler);
-
+ inet_diag_unlock_handler(handler);
+ } else {
+ err = -ENOENT;
+ }
/* The skb is not large enough to fit one sk info and
* inet_sk_diag_fill() has requested for a larger skb.
*/
@@ -1457,10 +1475,9 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
}
handler = inet_diag_lock_handler(sk->sk_protocol);
- if (IS_ERR(handler)) {
- inet_diag_unlock_handler(handler);
+ if (!handler) {
nlmsg_cancel(skb, nlh);
- return PTR_ERR(handler);
+ return -ENOENT;
}
attr = handler->idiag_info_size
@@ -1479,6 +1496,7 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
}
static const struct sock_diag_handler inet_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_INET,
.dump = inet_diag_handler_cmd,
.get_info = inet_diag_handler_get_info,
@@ -1486,6 +1504,7 @@ static const struct sock_diag_handler inet_diag_handler = {
};
static const struct sock_diag_handler inet6_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_INET6,
.dump = inet_diag_handler_cmd,
.get_info = inet_diag_handler_get_info,
@@ -1495,20 +1514,12 @@ static const struct sock_diag_handler inet6_diag_handler = {
int inet_diag_register(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
- int err = -EINVAL;
if (type >= IPPROTO_MAX)
- goto out;
+ return -EINVAL;
- mutex_lock(&inet_diag_table_mutex);
- err = -EEXIST;
- if (!inet_diag_table[type]) {
- inet_diag_table[type] = h;
- err = 0;
- }
- mutex_unlock(&inet_diag_table_mutex);
-out:
- return err;
+ return !cmpxchg((const struct inet_diag_handler **)&inet_diag_table[type],
+ NULL, h) ? 0 : -EEXIST;
}
EXPORT_SYMBOL_GPL(inet_diag_register);
@@ -1519,12 +1530,16 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
if (type >= IPPROTO_MAX)
return;
- mutex_lock(&inet_diag_table_mutex);
- inet_diag_table[type] = NULL;
- mutex_unlock(&inet_diag_table_mutex);
+ xchg((const struct inet_diag_handler **)&inet_diag_table[type],
+ NULL);
}
EXPORT_SYMBOL_GPL(inet_diag_unregister);
+static const struct sock_diag_inet_compat inet_diag_compat = {
+ .owner = THIS_MODULE,
+ .fn = inet_diag_rcv_msg_compat,
+};
+
static int __init inet_diag_init(void)
{
const int inet_diag_table_size = (IPPROTO_MAX *
@@ -1543,7 +1558,7 @@ static int __init inet_diag_init(void)
if (err)
goto out_free_inet;
- sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
+ sock_diag_register_inet_compat(&inet_diag_compat);
out:
return err;
@@ -1558,7 +1573,7 @@ static void __exit inet_diag_exit(void)
{
sock_diag_unregister(&inet6_diag_handler);
sock_diag_unregister(&inet_diag_handler);
- sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
+ sock_diag_unregister_inet_compat(&inet_diag_compat);
kfree(inet_diag_table);
}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 308ff34002ea..7498af320164 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -24,6 +24,7 @@
#include <net/inet6_hashtables.h>
#endif
#include <net/secure_seq.h>
+#include <net/hotdata.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
@@ -32,8 +33,6 @@ u32 inet_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
{
- static u32 inet_ehash_secret __read_mostly;
-
net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e9fed83e9b3c..5bd759963451 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -81,10 +81,7 @@ void __init inet_initpeers(void)
inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
- peer_cachep = kmem_cache_create("inet_peer_cache",
- sizeof(struct inet_peer),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
- NULL);
+ peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
}
/* Called with rcu_read_lock() or base->lock held */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6b9cf5a24c19..7b16c211b904 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1025,14 +1025,16 @@ static int __net_init ipgre_init_net(struct net *net)
return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
}
-static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
+ ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
+ dev_to_kill);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
- .exit_batch = ipgre_exit_batch_net,
+ .exit_batch_rtnl = ipgre_exit_batch_rtnl,
.id = &ipgre_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1697,14 +1699,16 @@ static int __net_init ipgre_tap_init_net(struct net *net)
return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
}
-static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
+ ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
+ dev_to_kill);
}
static struct pernet_operations ipgre_tap_net_ops = {
.init = ipgre_tap_init_net,
- .exit_batch = ipgre_tap_exit_batch_net,
+ .exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
.id = &gre_tap_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1715,14 +1719,16 @@ static int __net_init erspan_init_net(struct net *net)
&erspan_link_ops, "erspan0");
}
-static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
+static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
+ ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
+ dev_to_kill);
}
static struct pernet_operations erspan_net_ops = {
.init = erspan_init_net,
- .exit_batch = erspan_exit_batch_net,
+ .exit_batch_rtnl = erspan_exit_batch_rtnl,
.id = &erspan_net_id,
.size = sizeof(struct ip_tunnel_net),
};
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 67d846622365..33f93dc730a3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -493,7 +493,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
inet->inet_dport,
inet->inet_sport,
sk->sk_protocol,
- RT_CONN_FLAGS_TOS(sk, tos),
+ RT_TOS(tos),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
@@ -1458,6 +1458,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
skb->mark = cork->mark;
skb->tstamp = cork->transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 21d2ffa919e9..cf377377b52d 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -894,7 +894,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
- int val = 0, err;
+ int val = 0, err, retv;
bool needs_rtnl = setsockopt_needs_rtnl(optname);
switch (optname) {
@@ -938,8 +938,12 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
/* If optlen==0, it is equivalent to val == 0 */
- if (optname == IP_ROUTER_ALERT)
- return ip_ra_control(sk, val ? 1 : 0, NULL);
+ if (optname == IP_ROUTER_ALERT) {
+ retv = ip_ra_control(sk, val ? 1 : 0, NULL);
+ if (retv == 0)
+ inet_assign_bit(RTALERT, sk, val);
+ return retv;
+ }
if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk, optname, optval, optlen);
@@ -1575,6 +1579,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_BIND_ADDRESS_NO_PORT:
val = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
goto copyval;
+ case IP_ROUTER_ALERT:
+ val = inet_test_bit(RTALERT, sk);
+ goto copyval;
case IP_TTL:
val = READ_ONCE(inet->uc_ttl);
if (val < 0)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 1b6981de3f29..1b8d8ff9a237 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -102,10 +102,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else
- cand = t;
+ cand = t;
}
hlist_for_each_entry_rcu(t, head, hash_node) {
@@ -117,9 +116,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -137,9 +136,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -150,9 +149,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
!(t->dev->flags & IFF_UP))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -221,7 +220,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
hlist_for_each_entry_rcu(t, head, hash_node) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
- link == t->parms.link &&
+ link == READ_ONCE(t->parms.link) &&
type == t->dev->type &&
ip_tunnel_key_match(&t->parms, flags, key))
break;
@@ -378,7 +377,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
bool log_ecn_error)
{
const struct iphdr *iph = ip_hdr(skb);
- int err;
+ int nh, err;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
@@ -404,8 +403,21 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tunnel->i_seqno = ntohl(tpi->seq) + 1;
}
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(tunnel->dev, rx_length_errors);
+ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto drop;
+ }
+ iph = (struct iphdr *)(skb->head + nh);
+
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
@@ -761,7 +773,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos),
- dev_net(dev), tunnel->parms.link,
+ dev_net(dev), READ_ONCE(tunnel->parms.link),
tunnel->fwmark, skb_get_hash(skb), 0);
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
@@ -881,7 +893,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
if (t->parms.link != p->link || t->fwmark != fwmark) {
int mtu;
- t->parms.link = p->link;
+ WRITE_ONCE(t->parms.link, p->link);
t->fwmark = fwmark;
mtu = ip_tunnel_bind_dev(dev);
if (set_mtu)
@@ -1071,9 +1083,9 @@ EXPORT_SYMBOL(ip_tunnel_get_link_net);
int ip_tunnel_get_iflink(const struct net_device *dev)
{
- struct ip_tunnel *tunnel = netdev_priv(dev);
+ const struct ip_tunnel *tunnel = netdev_priv(dev);
- return tunnel->parms.link;
+ return READ_ONCE(tunnel->parms.link);
}
EXPORT_SYMBOL(ip_tunnel_get_iflink);
@@ -1144,19 +1156,17 @@ static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
}
void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
- struct rtnl_link_ops *ops)
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill)
{
struct ip_tunnel_net *itn;
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
itn = net_generic(net, id);
- ip_tunnel_destroy(net, itn, &list, ops);
+ ip_tunnel_destroy(net, itn, dev_to_kill, ops);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
@@ -1285,6 +1295,7 @@ int ip_tunnel_init(struct net_device *dev)
if (tunnel->collect_md)
netif_keep_dst(dev);
+ netdev_lockdep_set_classes(dev);
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index d1d6bb28ed6e..ee587adb169f 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -510,14 +510,16 @@ static int __net_init vti_init_net(struct net *net)
return 0;
}
-static void __net_exit vti_exit_batch_net(struct list_head *list_net)
+static void __net_exit vti_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops);
+ ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops,
+ dev_to_kill);
}
static struct pernet_operations vti_net_ops = {
.init = vti_init_net,
- .exit_batch = vti_exit_batch_net,
+ .exit_batch_rtnl = vti_exit_batch_rtnl,
.id = &vti_net_id,
.size = sizeof(struct ip_tunnel_net),
};
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 03afa3871efc..f2696eaadbe6 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -592,14 +592,16 @@ static int __net_init ipip_init_net(struct net *net)
return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
}
-static void __net_exit ipip_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipip_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops);
+ ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops,
+ dev_to_kill);
}
static struct pernet_operations ipip_net_ops = {
.init = ipip_init_net,
- .exit_batch = ipip_exit_batch_net,
+ .exit_batch_rtnl = ipip_exit_batch_rtnl,
.id = &ipip_net_id,
.size = sizeof(struct ip_tunnel_net),
};
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 362229836510..fd5c01c8489f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1603,9 +1603,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
if (copy_from_sockptr(&olr, optlen, sizeof(int)))
return -EFAULT;
- olr = min_t(unsigned int, olr, sizeof(int));
if (olr < 0)
return -EINVAL;
+
+ olr = min_t(unsigned int, olr, sizeof(int));
+
if (copy_to_sockptr(optlen, &olr, sizeof(int)))
return -EFAULT;
if (copy_to_sockptr(optval, &val, olr))
@@ -2587,7 +2589,9 @@ errout_free:
static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
int err;
if (cb->strict_check) {
@@ -3139,10 +3143,7 @@ int __init ip_mr_init(void)
{
int err;
- mrt_cachep = kmem_cache_create("ip_mrt_cache",
- sizeof(struct mfc_cache),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
- NULL);
+ mrt_cachep = KMEM_CACHE(mfc_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
err = register_pernet_subsys(&ipmr_net_ops);
if (err)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index f71a7e9a7de6..8f6e950163a7 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -10,6 +10,10 @@ config NF_DEFRAG_IPV4
tristate
default n
+# old sockopt interface and eval loop
+config IP_NF_IPTABLES_LEGACY
+ tristate
+
config NF_SOCKET_IPV4
tristate "IPv4 socket lookup support"
help
@@ -152,7 +156,7 @@ config IP_NF_MATCH_ECN
config IP_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED
- depends on IP_NF_MANGLE || IP_NF_RAW
+ depends on IP_NF_MANGLE || IP_NF_RAW || NFT_COMPAT
help
This option allows you to match packets whose replies would
go out via the interface the packet came in.
@@ -173,6 +177,7 @@ config IP_NF_MATCH_TTL
config IP_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
+ select IP_NF_IPTABLES_LEGACY
help
Packet filtering defines a table `filter', which has a series of
rules for simple packet filtering at local input, forwarding and
@@ -182,7 +187,7 @@ config IP_NF_FILTER
config IP_NF_TARGET_REJECT
tristate "REJECT target support"
- depends on IP_NF_FILTER
+ depends on IP_NF_FILTER || NFT_COMPAT
select NF_REJECT_IPV4
default m if NETFILTER_ADVANCED=n
help
@@ -212,6 +217,7 @@ config IP_NF_NAT
default m if NETFILTER_ADVANCED=n
select NF_NAT
select NETFILTER_XT_NAT
+ select IP_NF_IPTABLES_LEGACY
help
This enables the `nat' table in iptables. This allows masquerading,
port forwarding and other forms of full Network Address Port
@@ -252,6 +258,7 @@ endif # IP_NF_NAT
config IP_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -261,7 +268,7 @@ config IP_NF_MANGLE
config IP_NF_TARGET_ECN
tristate "ECN target support"
- depends on IP_NF_MANGLE
+ depends on IP_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `ECN' target, which can be used in the iptables mangle
@@ -286,6 +293,7 @@ config IP_NF_TARGET_TTL
# raw + specific targets
config IP_NF_RAW
tristate 'raw table support (required for NOTRACK/TRACE)'
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to iptables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -299,6 +307,7 @@ config IP_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -309,36 +318,35 @@ endif # IP_NF_IPTABLES
# ARP tables
config IP_NF_ARPTABLES
- tristate "ARP tables support"
- select NETFILTER_XTABLES
- select NETFILTER_FAMILY_ARP
- depends on NETFILTER_ADVANCED
- help
- arptables is a general, extensible packet identification framework.
- The ARP packet filtering and mangling (manipulation)subsystems
- use this: say Y or M here if you want to use either of those.
-
- To compile it as a module, choose M here. If unsure, say N.
+ tristate
-if IP_NF_ARPTABLES
+config NFT_COMPAT_ARP
+ tristate
+ depends on NF_TABLES_ARP && NFT_COMPAT
+ default m if NFT_COMPAT=m
+ default y if NFT_COMPAT=y
config IP_NF_ARPFILTER
- tristate "ARP packet filtering"
+ tristate "arptables-legacy packet filtering support"
+ select IP_NF_ARPTABLES
+ depends on NETFILTER_XTABLES
help
ARP packet filtering defines a table `filter', which has a series of
rules for simple ARP packet filtering at local input and
- local output. On a bridge, you can also specify filtering rules
- for forwarded ARP packets. See the man page for arptables(8).
+ local output. This is only needed for arptables-legacy(8).
+ Neither arptables-nft nor nftables need this to work.
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_ARP_MANGLE
tristate "ARP payload mangling"
+ depends on IP_NF_ARPTABLES || NFT_COMPAT_ARP
help
Allows altering the ARP packet payload: source and destination
hardware and network addresses.
-endif # IP_NF_ARPTABLES
+ This option is needed by both arptables-legacy and arptables-nft.
+ It is not used by nftables.
endmenu
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 5a26f9de1ab9..85502d4dfbb4 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_ipv4.o
obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
# generic IP tables
-obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
+obj-$(CONFIG_IP_NF_IPTABLES_LEGACY) += ip_tables.o
# the three instances of ip_tables
obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index bbff68b5b5d4..74928a9d1aa4 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -26,6 +26,9 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
#define NH_DEV_HASHBITS 8
#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
+#define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \
+ NHA_OP_FLAG_DUMP_HW_STATS)
+
static const struct nla_policy rtm_nh_policy_new[] = {
[NHA_ID] = { .type = NLA_U32 },
[NHA_GROUP] = { .type = NLA_BINARY },
@@ -37,10 +40,17 @@ static const struct nla_policy rtm_nh_policy_new[] = {
[NHA_ENCAP] = { .type = NLA_NESTED },
[NHA_FDB] = { .type = NLA_FLAG },
[NHA_RES_GROUP] = { .type = NLA_NESTED },
+ [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true),
};
static const struct nla_policy rtm_nh_policy_get[] = {
[NHA_ID] = { .type = NLA_U32 },
+ [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ NHA_OP_FLAGS_DUMP_ALL),
+};
+
+static const struct nla_policy rtm_nh_policy_del[] = {
+ [NHA_ID] = { .type = NLA_U32 },
};
static const struct nla_policy rtm_nh_policy_dump[] = {
@@ -48,6 +58,8 @@ static const struct nla_policy rtm_nh_policy_dump[] = {
[NHA_GROUPS] = { .type = NLA_FLAG },
[NHA_MASTER] = { .type = NLA_U32 },
[NHA_FDB] = { .type = NLA_FLAG },
+ [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ NHA_OP_FLAGS_DUMP_ALL),
};
static const struct nla_policy rtm_nh_res_policy_new[] = {
@@ -92,6 +104,7 @@ __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
else if (nh_info->gw_family == AF_INET6)
nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
+ nh_info->id = nhi->nh_parent->id;
nh_info->is_reject = nhi->reject_nh;
nh_info->is_fdb = nhi->fdb_nh;
nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
@@ -131,13 +144,13 @@ static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
info->nh_grp->num_nh = num_nh;
info->nh_grp->is_fdb = nhg->fdb_nh;
+ info->nh_grp->hw_stats = nhg->hw_stats;
for (i = 0; i < num_nh; i++) {
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
struct nh_info *nhi;
nhi = rtnl_dereference(nhge->nh->nh_info);
- info->nh_grp->nh_entries[i].id = nhge->nh->id;
info->nh_grp->nh_entries[i].weight = nhge->weight;
__nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
nhi);
@@ -162,6 +175,7 @@ static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
return -ENOMEM;
info->nh_res_table->num_nh_buckets = num_nh_buckets;
+ info->nh_res_table->hw_stats = nhg->hw_stats;
for (i = 0; i < num_nh_buckets; i++) {
struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
@@ -393,6 +407,7 @@ static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
struct nh_notifier_info info = {
.net = net,
.extack = extack,
+ .id = nh->id,
};
struct nh_group *nhg;
int err;
@@ -474,6 +489,7 @@ static void nexthop_free_group(struct nexthop *nh)
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
WARN_ON(!list_empty(&nhge->nh_list));
+ free_percpu(nhge->stats);
nexthop_put(nhge->nh);
}
@@ -654,8 +670,202 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
+static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
+{
+ struct nh_grp_entry_stats *cpu_stats;
+
+ cpu_stats = get_cpu_ptr(nhge->stats);
+ u64_stats_update_begin(&cpu_stats->syncp);
+ u64_stats_inc(&cpu_stats->packets);
+ u64_stats_update_end(&cpu_stats->syncp);
+ put_cpu_ptr(cpu_stats);
+}
+
+static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
+ u64 *ret_packets)
{
+ int i;
+
+ *ret_packets = 0;
+
+ for_each_possible_cpu(i) {
+ struct nh_grp_entry_stats *cpu_stats;
+ unsigned int start;
+ u64 packets;
+
+ cpu_stats = per_cpu_ptr(nhge->stats, i);
+ do {
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
+ packets = u64_stats_read(&cpu_stats->packets);
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
+
+ *ret_packets += packets;
+ }
+}
+
+static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
+ const struct nexthop *nh)
+{
+ struct nh_group *nhg;
+ int i;
+
+ ASSERT_RTNL();
+ nhg = rtnl_dereference(nh->nh_grp);
+
+ info->id = nh->id;
+ info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
+ info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
+ stats, nhg->num_nh),
+ GFP_KERNEL);
+ if (!info->nh_grp_hw_stats)
+ return -ENOMEM;
+
+ info->nh_grp_hw_stats->num_nh = nhg->num_nh;
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
+ }
+
+ return 0;
+}
+
+static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
+{
+ kfree(info->nh_grp_hw_stats);
+}
+
+void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
+ unsigned int nh_idx,
+ u64 delta_packets)
+{
+ info->hw_stats_used = true;
+ info->stats[nh_idx].packets += delta_packets;
+}
+EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
+
+static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
+ struct nh_notifier_info *info)
+{
+ struct nh_group *nhg;
+ int i;
+
+ ASSERT_RTNL();
+ nhg = rtnl_dereference(nh->nh_grp);
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
+ }
+}
+
+static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
+{
+ struct nh_notifier_info info = {
+ .net = nh->net,
+ };
+ struct net *net = nh->net;
+ int err;
+
+ if (nexthop_notifiers_is_empty(net))
+ return 0;
+
+ err = nh_notifier_grp_hw_stats_init(&info, nh);
+ if (err)
+ return err;
+
+ err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
+ NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
+ &info);
+
+ /* Cache whatever we got, even if there was an error, otherwise the
+ * successful stats retrievals would get lost.
+ */
+ nh_grp_hw_stats_apply_update(nh, &info);
+ *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
+
+ nh_notifier_grp_hw_stats_fini(&info);
+ return notifier_to_errno(err);
+}
+
+static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
+ struct nh_grp_entry *nhge,
+ u32 op_flags)
+{
+ struct nlattr *nest;
+ u64 packets;
+
+ nh_grp_entry_stats_read(nhge, &packets);
+
+ nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
+ nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
+ packets + nhge->packets_hw))
+ goto nla_put_failure;
+
+ if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
+ nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
+ nhge->packets_hw))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
+ u32 op_flags)
+{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ struct nlattr *nest;
+ bool hw_stats_used;
+ int err;
+ int i;
+
+ if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
+ goto err_out;
+
+ if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
+ nhg->hw_stats) {
+ err = nh_grp_hw_stats_update(nh, &hw_stats_used);
+ if (err)
+ goto out;
+
+ if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
+ goto err_out;
+ }
+
+ nest = nla_nest_start(skb, NHA_GROUP_STATS);
+ if (!nest)
+ goto err_out;
+
+ for (i = 0; i < nhg->num_nh; i++)
+ if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
+ op_flags))
+ goto cancel_out;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+cancel_out:
+ nla_nest_cancel(skb, nest);
+err_out:
+ err = -EMSGSIZE;
+out:
+ return err;
+}
+
+static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
+ u32 op_flags)
+{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
struct nexthop_grp *p;
size_t len = nhg->num_nh * sizeof(*p);
struct nlattr *nla;
@@ -684,6 +894,11 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
goto nla_put_failure;
+ if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
+ (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
+ nla_put_nh_group_stats(skb, nh, op_flags)))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -691,7 +906,8 @@ nla_put_failure:
}
static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
- int event, u32 portid, u32 seq, unsigned int nlflags)
+ int event, u32 portid, u32 seq, unsigned int nlflags,
+ u32 op_flags)
{
struct fib6_nh *fib6_nh;
struct fib_nh *fib_nh;
@@ -718,7 +934,7 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
goto nla_put_failure;
- if (nla_put_nh_group(skb, nhg))
+ if (nla_put_nh_group(skb, nh, op_flags))
goto nla_put_failure;
goto out;
}
@@ -849,7 +1065,7 @@ static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
if (!skb)
goto errout;
- err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
+ err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -1104,6 +1320,7 @@ static int nh_check_attr_group(struct net *net,
if (!tb[i])
continue;
switch (i) {
+ case NHA_HW_STATS_ENABLE:
case NHA_FDB:
continue;
case NHA_RES_GROUP:
@@ -1176,6 +1393,7 @@ static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
if (hash > atomic_read(&nhge->hthr.upper_bound))
continue;
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
@@ -1185,7 +1403,7 @@ static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
{
- struct nexthop *rc = NULL;
+ struct nh_grp_entry *nhge0 = NULL;
int i;
if (nhg->fdb_nh)
@@ -1200,16 +1418,20 @@ static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
if (!nexthop_is_good_nh(nhge->nh))
continue;
- if (!rc)
- rc = nhge->nh;
+ if (!nhge0)
+ nhge0 = nhge;
if (hash > atomic_read(&nhge->hthr.upper_bound))
continue;
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
- return rc ? : nhg->nh_entries[0].nh;
+ if (!nhge0)
+ nhge0 = &nhg->nh_entries[0];
+ nh_grp_entry_stats_inc(nhge0);
+ return nhge0->nh;
}
static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
@@ -1225,6 +1447,7 @@ static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
bucket = &res_table->nh_buckets[bucket_index];
nh_res_bucket_set_busy(bucket);
nhge = rcu_dereference(bucket->nh_entry);
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
@@ -1798,6 +2021,7 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
newg->has_v4 = true;
list_del(&nhges[i].nh_list);
+ new_nhges[j].stats = nhges[i].stats;
new_nhges[j].nh_parent = nhges[i].nh_parent;
new_nhges[j].nh = nhges[i].nh;
new_nhges[j].weight = nhges[i].weight;
@@ -1813,6 +2037,7 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
rcu_assign_pointer(nhp->nh_grp, newg);
list_del(&nhge->nh_list);
+ free_percpu(nhge->stats);
nexthop_put(nhge->nh);
/* Removal of a NH from a resilient group is notified through
@@ -2477,6 +2702,13 @@ static struct nexthop *nexthop_create_group(struct net *net,
if (nhi->family == AF_INET)
nhg->has_v4 = true;
+ nhg->nh_entries[i].stats =
+ netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
+ if (!nhg->nh_entries[i].stats) {
+ err = -ENOMEM;
+ nexthop_put(nhe);
+ goto out_no_nh;
+ }
nhg->nh_entries[i].nh = nhe;
nhg->nh_entries[i].weight = entry[i].weight + 1;
list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
@@ -2509,6 +2741,9 @@ static struct nexthop *nexthop_create_group(struct net *net,
if (cfg->nh_fdb)
nhg->fdb_nh = 1;
+ if (cfg->nh_hw_stats)
+ nhg->hw_stats = true;
+
rcu_assign_pointer(nh->nh_grp, nhg);
return nh;
@@ -2516,6 +2751,7 @@ static struct nexthop *nexthop_create_group(struct net *net,
out_no_nh:
for (i--; i >= 0; --i) {
list_del(&nhg->nh_entries[i].nh_list);
+ free_percpu(nhg->nh_entries[i].stats);
nexthop_put(nhg->nh_entries[i].nh);
}
@@ -2850,6 +3086,9 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
cfg, extack);
+ if (tb[NHA_HW_STATS_ENABLE])
+ cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
+
/* no other attributes should be set */
goto out;
}
@@ -2941,6 +3180,10 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
goto out;
}
+ if (tb[NHA_HW_STATS_ENABLE]) {
+ NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
+ goto out;
+ }
err = 0;
out:
@@ -2966,9 +3209,9 @@ static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
-static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
- struct nlattr **tb, u32 *id,
- struct netlink_ext_ack *extack)
+static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
+ struct nlattr **tb, u32 *id, u32 *op_flags,
+ struct netlink_ext_ack *extack)
{
struct nhmsg *nhm = nlmsg_data(nlh);
@@ -2988,28 +3231,21 @@ static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
return -EINVAL;
}
- return 0;
-}
-
-static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id,
- struct netlink_ext_ack *extack)
-{
- struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
- int err;
-
- err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
- ARRAY_SIZE(rtm_nh_policy_get) - 1,
- rtm_nh_policy_get, extack);
- if (err < 0)
- return err;
+ if (op_flags) {
+ if (tb[NHA_OP_FLAGS])
+ *op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
+ else
+ *op_flags = 0;
+ }
- return __nh_valid_get_del_req(nlh, tb, id, extack);
+ return 0;
}
/* rtnl */
static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
struct net *net = sock_net(skb->sk);
struct nl_info nlinfo = {
.nlh = nlh,
@@ -3020,7 +3256,13 @@ static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u32 id;
- err = nh_valid_get_del_req(nlh, &id, extack);
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
+ ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
+ extack);
+ if (err < 0)
+ return err;
+
+ err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
if (err)
return err;
@@ -3037,13 +3279,21 @@ static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
struct net *net = sock_net(in_skb->sk);
struct sk_buff *skb = NULL;
struct nexthop *nh;
+ u32 op_flags;
int err;
u32 id;
- err = nh_valid_get_del_req(nlh, &id, extack);
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
+ ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
+ extack);
+ if (err < 0)
+ return err;
+
+ err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
if (err)
return err;
@@ -3058,7 +3308,7 @@ static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout_free;
err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0);
+ nlh->nlmsg_seq, 0, op_flags);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
goto errout_free;
@@ -3079,6 +3329,7 @@ struct nh_dump_filter {
bool group_filter;
bool fdb_filter;
u32 res_bucket_nh_id;
+ u32 op_flags;
};
static bool nh_dump_filtered(struct nexthop *nh,
@@ -3166,6 +3417,11 @@ static int nh_valid_dump_req(const struct nlmsghdr *nlh,
if (err < 0)
return err;
+ if (tb[NHA_OP_FLAGS])
+ filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
+ else
+ filter->op_flags = 0;
+
return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
}
@@ -3223,7 +3479,7 @@ static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
}
/* rtnl */
@@ -3241,10 +3497,6 @@ static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
&rtm_dump_nexthop_cb, &filter);
- if (err < 0) {
- if (likely(skb->len))
- err = skb->len;
- }
cb->seq = net->nexthop.seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -3439,11 +3691,6 @@ static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
&rtm_dump_nexthop_bucket_cb, &dd);
}
- if (err < 0) {
- if (likely(skb->len))
- err = skb->len;
- }
-
cb->seq = net->nexthop.seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
return err;
@@ -3483,7 +3730,7 @@ static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
if (err < 0)
return err;
- err = __nh_valid_get_del_req(nlh, tb, id, extack);
+ err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
if (err)
return err;
@@ -3631,17 +3878,24 @@ unlock:
}
EXPORT_SYMBOL(register_nexthop_notifier);
-int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
int err;
- rtnl_lock();
err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
nb);
- if (err)
- goto unlock;
- nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
-unlock:
+ if (!err)
+ nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
+ return err;
+}
+EXPORT_SYMBOL(__unregister_nexthop_notifier);
+
+int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+{
+ int err;
+
+ rtnl_lock();
+ err = __unregister_nexthop_notifier(net, nb);
rtnl_unlock();
return err;
}
@@ -3737,16 +3991,20 @@ out:
}
EXPORT_SYMBOL(nexthop_res_grp_activity_update);
-static void __net_exit nexthop_net_exit_batch(struct list_head *net_list)
+static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list) {
+ ASSERT_RTNL();
+ list_for_each_entry(net, net_list, exit_list)
flush_all_nexthops(net);
- kfree(net->nexthop.devhash);
- }
- rtnl_unlock();
+}
+
+static void __net_exit nexthop_net_exit(struct net *net)
+{
+ kfree(net->nexthop.devhash);
+ net->nexthop.devhash = NULL;
}
static int __net_init nexthop_net_init(struct net *net)
@@ -3764,7 +4022,8 @@ static int __net_init nexthop_net_init(struct net *net)
static struct pernet_operations nexthop_net_ops = {
.init = nexthop_net_init,
- .exit_batch = nexthop_net_exit_batch,
+ .exit = nexthop_net_exit,
+ .exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
};
static int __init nexthop_init(void)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 5f4654ebff48..914bc9c35cc7 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -395,7 +395,7 @@ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
seq_printf(seq, "\nIp: %d %d",
- IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2,
+ IPV4_DEVCONF_ALL_RO(net, FORWARDING) ? 1 : 2,
READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index aea89326c697..42ac434cfcfa 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -175,6 +175,13 @@ static int raw_v4_input(struct net *net, struct sk_buff *skb,
if (!raw_v4_match(net, sk, iph->protocol,
iph->saddr, iph->daddr, dif, sdif))
continue;
+
+ if (atomic_read(&sk->sk_rmem_alloc) >=
+ READ_ONCE(sk->sk_rcvbuf)) {
+ atomic_inc(&sk->sk_drops);
+ continue;
+ }
+
delivered = 1;
if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) &&
ip_mc_sf_allow(sk, iph->daddr, iph->saddr,
@@ -310,7 +317,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
}
nf_reset_ct(skb);
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
raw_rcv_skb(sk, skb);
return 0;
@@ -353,6 +360,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc->mark;
skb->tstamp = sockc->transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
@@ -815,7 +823,7 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
out: return ret;
}
-static int do_raw_setsockopt(struct sock *sk, int level, int optname,
+static int do_raw_setsockopt(struct sock *sk, int optname,
sockptr_t optval, unsigned int optlen)
{
if (optname == ICMP_FILTER) {
@@ -832,11 +840,11 @@ static int raw_setsockopt(struct sock *sk, int level, int optname,
{
if (level != SOL_RAW)
return ip_setsockopt(sk, level, optname, optval, optlen);
- return do_raw_setsockopt(sk, level, optname, optval, optlen);
+ return do_raw_setsockopt(sk, optname, optval, optlen);
}
-static int do_raw_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
+static int do_raw_getsockopt(struct sock *sk, int optname,
+ char __user *optval, int __user *optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
@@ -852,7 +860,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
{
if (level != SOL_RAW)
return ip_getsockopt(sk, level, optname, optval, optlen);
- return do_raw_getsockopt(sk, level, optname, optval, optlen);
+ return do_raw_getsockopt(sk, optname, optval, optlen);
}
static int raw_ioctl(struct sock *sk, int cmd, int *karg)
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index fe2140c8375c..cc793bd8de25 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -213,6 +213,7 @@ static int raw_diag_destroy(struct sk_buff *in_skb,
#endif
static const struct inet_diag_handler raw_diag_handler = {
+ .owner = THIS_MODULE,
.dump = raw_diag_dump,
.dump_one = raw_diag_dump_one,
.idiag_get_info = raw_diag_get_info,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 16615d107cf0..c8f76f56dc16 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2313,7 +2313,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (IN_DEV_BFORWARD(in_dev))
goto make_route;
/* not do cache if bc_forwarding is enabled */
- if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
+ if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING))
do_cache = false;
goto brd_input;
}
@@ -2993,7 +2993,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
#ifdef CONFIG_IP_MROUTE
if (ipv4_is_multicast(dst) &&
!ipv4_is_local_multicast(dst) &&
- IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
+ IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) {
int err = ipmr_get_route(net, skb,
fl4->saddr, fl4->daddr,
r, portid);
@@ -3693,9 +3693,8 @@ int __init ip_rt_init(void)
panic("IP: failed to allocate ip_rt_acct\n");
#endif
- ipv4_dst_ops.kmem_cachep =
- kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC);
ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 61f1c96cfe63..7972ad3d7c73 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -51,15 +51,6 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
count, &syncookie_secret[c]);
}
-/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
-static u64 tcp_ns_to_ts(bool usec_ts, u64 val)
-{
- if (usec_ts)
- return div_u64(val, NSEC_PER_USEC);
-
- return div_u64(val, NSEC_PER_MSEC);
-}
-
/*
* when syncookies are in effect and tcp timestamps are enabled we encode
* tcp options in the lower bits of the timestamp value that will be
@@ -304,6 +295,24 @@ static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb,
return 0;
}
+#if IS_ENABLED(CONFIG_BPF)
+struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct request_sock *req = inet_reqsk(skb->sk);
+
+ skb->sk = NULL;
+ skb->destructor = NULL;
+
+ if (cookie_tcp_reqsk_init(sk, skb, req)) {
+ reqsk_free(req);
+ req = NULL;
+ }
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(cookie_bpf_check);
+#endif
+
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct sock *sk, struct sk_buff *skb,
struct tcp_options_received *tcp_opt,
@@ -399,16 +408,23 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
struct rtable *rt;
__u8 rcv_wscale;
int full_space;
+ SKB_DR(reason);
if (!READ_ONCE(net->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
- if (IS_ERR(req))
- goto out;
- if (!req)
+ if (cookie_bpf_ok(skb)) {
+ req = cookie_bpf_check(sk, skb);
+ } else {
+ req = cookie_tcp_check(net, sk, skb);
+ if (IS_ERR(req))
+ goto out;
+ }
+ if (!req) {
+ SKB_DR_SET(reason, NO_SOCKET);
goto out_drop;
+ }
ireq = inet_rsk(req);
@@ -420,8 +436,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
*/
RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
- if (security_inet_conn_request(sk, skb, req))
+ if (security_inet_conn_request(sk, skb, req)) {
+ SKB_DR_SET(reason, SECURITY_HOOK);
goto out_free;
+ }
tcp_ao_syncookie(sk, skb, req, AF_INET);
@@ -438,8 +456,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
security_req_classify_flow(req, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
- if (IS_ERR(rt))
+ if (IS_ERR(rt)) {
+ SKB_DR_SET(reason, IP_OUTNOROUTES);
goto out_free;
+ }
/* Try to redo what tcp_v4_send_synack did. */
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
@@ -454,19 +474,24 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, &rt->dst);
ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
/* ip_queue_xmit() depends on our flow being setup
* Normal sockets get it right from inet_csk_route_child_sock()
*/
- if (ret)
- inet_sk(ret)->cork.fl.u.ip4 = fl4;
+ if (!ret) {
+ SKB_DR_SET(reason, NO_SOCKET);
+ goto out_drop;
+ }
+ inet_sk(ret)->cork.fl.u.ip4 = fl4;
out:
return ret;
out_free:
reqsk_free(req);
out_drop:
+ kfree_skb_reason(skb, reason);
return NULL;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c82dc42f57c6..d20b62d52171 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -279,6 +279,7 @@
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/busy_poll.h>
+#include <net/rps.h>
/* Track pending CMSGs. */
enum {
@@ -974,7 +975,7 @@ int tcp_wmem_schedule(struct sock *sk, int copy)
* Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
* to guarantee some progress.
*/
- left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued;
+ left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued;
if (left > 0)
sk_forced_mem_schedule(sk, min(left, copy));
return min(copy, sk->sk_forward_alloc);
@@ -4010,11 +4011,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
if (copy_from_sockptr(&len, optlen, sizeof(int)))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case TCP_MAXSEG:
val = tp->mss_cache;
@@ -4651,7 +4652,7 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 113);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 105);
/* TXRX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags);
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 87db432c6bb4..3afeeb68e8a7 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -509,9 +509,9 @@ static int tcp_ao_hash_header(struct tcp_sigpool *hp,
bool exclude_options, u8 *hash,
int hash_offset, int hash_len)
{
- int err, len = th->doff << 2;
struct scatterlist sg;
u8 *hdr = hp->scratch;
+ int err, len;
/* We are not allowed to change tcphdr, make a local copy */
if (exclude_options) {
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 22358032dd48..05dc2d05bc7c 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -1155,7 +1155,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
.set_state = bbr_set_state,
};
-BTF_SET8_START(tcp_bbr_check_kfunc_ids)
+BTF_KFUNCS_START(tcp_bbr_check_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID_FLAGS(func, bbr_init)
@@ -1168,7 +1168,7 @@ BTF_ID_FLAGS(func, bbr_min_tso_segs)
BTF_ID_FLAGS(func, bbr_set_state)
#endif
#endif
-BTF_SET8_END(tcp_bbr_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_bbr_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1b34050a7538..28ffcfbeef14 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -146,11 +146,7 @@ EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_congestion_ops *old_ca)
{
struct tcp_congestion_ops *existing;
- int ret;
-
- ret = tcp_validate_congestion_control(ca);
- if (ret)
- return ret;
+ int ret = 0;
ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 0fd78ecb67e7..44869ea089e3 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -485,7 +485,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
.name = "cubic",
};
-BTF_SET8_START(tcp_cubic_check_kfunc_ids)
+BTF_KFUNCS_START(tcp_cubic_check_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID_FLAGS(func, cubictcp_init)
@@ -496,7 +496,7 @@ BTF_ID_FLAGS(func, cubictcp_cwnd_event)
BTF_ID_FLAGS(func, cubictcp_acked)
#endif
#endif
-BTF_SET8_END(tcp_cubic_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_cubic_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index bb23bb5b387a..e33fbe4933e4 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -260,7 +260,7 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = {
.name = "dctcp-reno",
};
-BTF_SET8_START(tcp_dctcp_check_kfunc_ids)
+BTF_KFUNCS_START(tcp_dctcp_check_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID_FLAGS(func, dctcp_init)
@@ -271,7 +271,7 @@ BTF_ID_FLAGS(func, dctcp_cwnd_undo)
BTF_ID_FLAGS(func, dctcp_state)
#endif
#endif
-BTF_SET8_END(tcp_dctcp_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_dctcp_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_dctcp_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 4cbe4b44425a..f428ecf9120f 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -222,6 +222,7 @@ static int tcp_diag_destroy(struct sk_buff *in_skb,
#endif
static const struct inet_diag_handler tcp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = tcp_diag_dump,
.dump_one = tcp_diag_dump_one,
.idiag_get_info = tcp_diag_get_info,
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index df7b13f0e5e0..5d874817a78d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1164,7 +1164,7 @@ static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
* L|R 1 - orig is lost, retransmit is in flight.
* S|R 1 - orig reached receiver, retrans is still in flight.
* (L|S|R is logically valid, it could occur when L|R is sacked,
- * but it is equivalent to plain S and code short-curcuits it to S.
+ * but it is equivalent to plain S and code short-circuits it to S.
* L|S is logically invalid, it would mean -1 packet in flight 8))
*
* These 6 states form finite state machine, controlled by the following events:
@@ -6361,6 +6361,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
inet_csk_reset_xmit_timer(sk,
ICSK_TIME_RETRANS,
TCP_TIMEOUT_MIN, TCP_RTO_MAX);
+ SKB_DR_SET(reason, TCP_INVALID_ACK_SEQUENCE);
goto reset_and_undo;
}
@@ -6369,6 +6370,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_time_stamp_ts(tp))) {
NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED);
+ SKB_DR_SET(reason, TCP_RFC7323_PAWS);
goto reset_and_undo;
}
@@ -6572,7 +6574,8 @@ discard_and_undo:
reset_and_undo:
tcp_clear_options(&tp->rx_opt);
tp->rx_opt.mss_clamp = saved_clamp;
- return 1;
+ /* we can reuse/return @reason to its caller to handle the exception */
+ return reason;
}
static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
@@ -6616,14 +6619,14 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
* address independent.
*/
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+enum skb_drop_reason
+tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcphdr *th = tcp_hdr(skb);
struct request_sock *req;
int queued = 0;
- bool acceptable;
SKB_DR(reason);
switch (sk->sk_state) {
@@ -6633,7 +6636,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_LISTEN:
if (th->ack)
- return 1;
+ return SKB_DROP_REASON_TCP_FLAGS;
if (th->rst) {
SKB_DR_SET(reason, TCP_RESET);
@@ -6649,12 +6652,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
*/
rcu_read_lock();
local_bh_disable();
- acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
+ icsk->icsk_af_ops->conn_request(sk, skb);
local_bh_enable();
rcu_read_unlock();
- if (!acceptable)
- return 1;
consume_skb(skb);
return 0;
}
@@ -6699,17 +6700,25 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 0;
/* step 5: check the ACK field */
- acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
- FLAG_UPDATE_TS_RECENT |
- FLAG_NO_CHALLENGE_ACK) > 0;
-
- if (!acceptable) {
- if (sk->sk_state == TCP_SYN_RECV)
- return 1; /* send one RST */
- tcp_send_challenge_ack(sk);
- SKB_DR_SET(reason, TCP_OLD_ACK);
- goto discard;
+ reason = tcp_ack(sk, skb, FLAG_SLOWPATH |
+ FLAG_UPDATE_TS_RECENT |
+ FLAG_NO_CHALLENGE_ACK);
+
+ if ((int)reason <= 0) {
+ if (sk->sk_state == TCP_SYN_RECV) {
+ /* send one RST */
+ if (!reason)
+ return SKB_DROP_REASON_TCP_OLD_ACK;
+ return -reason;
+ }
+ /* accept old ack during closing */
+ if ((int)reason < 0) {
+ tcp_send_challenge_ack(sk);
+ reason = -reason;
+ goto discard;
+ }
}
+ SKB_DR_SET(reason, NOT_SPECIFIED);
switch (sk->sk_state) {
case TCP_SYN_RECV:
tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */
@@ -6777,7 +6786,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (READ_ONCE(tp->linger2) < 0) {
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
@@ -6786,7 +6795,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_fastopen_active_disable(sk);
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
tmo = tcp_fin_time(sk);
@@ -6851,7 +6860,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk, skb);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
}
fallthrough;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0c50c5a32b84..a22ee5838751 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1907,7 +1907,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (tcp_checksum_complete(skb))
goto csum_err;
@@ -1915,9 +1914,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
struct sock *nsk = tcp_v4_cookie_check(sk, skb);
if (!nsk)
- goto discard;
+ return 0;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb)) {
+ reason = tcp_child_process(sk, nsk, skb);
+ if (reason) {
rsk = nsk;
goto reset;
}
@@ -1926,7 +1926,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb)) {
+ reason = tcp_rcv_state_process(sk, skb);
+ if (reason) {
rsk = sk;
goto reset;
}
@@ -2275,10 +2276,12 @@ process:
if (nsk == sk) {
reqsk_put(req);
tcp_v4_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
- tcp_v4_send_reset(nsk, skb);
- goto discard_and_relse;
} else {
+ drop_reason = tcp_child_process(sk, nsk, skb);
+ if (drop_reason) {
+ tcp_v4_send_reset(nsk, skb);
+ goto discard_and_relse;
+ }
sock_put(sk);
return 0;
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9e85f2a0bddd..52040b0e2616 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -911,11 +911,11 @@ EXPORT_SYMBOL(tcp_check_req);
* be created.
*/
-int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb)
+enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb)
__releases(&((child)->sk_lock.slock))
{
- int ret = 0;
+ enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
int state = child->sk_state;
/* record sk_napi_id and sk_rx_queue_mapping of child. */
@@ -923,7 +923,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
- ret = tcp_rcv_state_process(child, skb);
+ reason = tcp_rcv_state_process(child, skb);
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
parent->sk_data_ready(parent);
@@ -937,6 +937,6 @@ int tcp_child_process(struct sock *parent, struct sock *child,
bh_unlock_sock(child);
sock_put(child);
- return ret;
+ return reason;
}
EXPORT_SYMBOL(tcp_child_process);
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 8311c38267b5..ebe4722bb020 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -204,7 +204,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
goto out;
hlen = off + thlen;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
th = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!th))
goto out;
@@ -299,18 +299,20 @@ out:
void tcp_gro_complete(struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
+ struct skb_shared_info *shinfo;
+
+ if (skb->encapsulation)
+ skb->inner_transport_header = skb->transport_header;
skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
- skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ shinfo = skb_shinfo(skb);
+ shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
-
- if (skb->encapsulation)
- skb->inner_transport_header = skb->transport_header;
+ shinfo->gso_type |= SKB_GSO_TCP_ECN;
}
EXPORT_SYMBOL(tcp_gro_complete);
@@ -335,24 +337,22 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
- if (NAPI_GRO_CB(skb)->is_atomic)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
+ (NAPI_GRO_CB(skb)->is_atomic * SKB_GSO_TCP_FIXEDID);
tcp_gro_complete(skb);
return 0;
}
-static const struct net_offload tcpv4_offload = {
- .callbacks = {
- .gso_segment = tcp4_gso_segment,
- .gro_receive = tcp4_gro_receive,
- .gro_complete = tcp4_gro_complete,
- },
-};
-
int __init tcpv4_offload_init(void)
{
- return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
+ net_hotdata.tcpv4_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = tcp4_gso_segment,
+ .gro_receive = tcp4_gro_receive,
+ .gro_complete = tcp4_gro_complete,
+ },
+ };
+ return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e474b201900f..661d0e0d273f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -411,8 +411,6 @@ INDIRECT_CALLABLE_SCOPE
u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
const __be32 faddr, const __be16 fport)
{
- static u32 udp_ehash_secret __read_mostly;
-
net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
@@ -1589,7 +1587,8 @@ int udp_init_sock(struct sock *sk)
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{
- sk_peek_offset_bwd(sk, len);
+ if (unlikely(READ_ONCE(udp_sk(sk)->peeking_with_offset)))
+ sk_peek_offset_bwd(sk, len);
if (!skb_unref(skb))
return;
@@ -2569,11 +2568,12 @@ int udp_v4_early_demux(struct sk_buff *skb)
uh->source, iph->saddr, dif, sdif);
}
- if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
+ if (!sk)
return 0;
skb->sk = sk;
- skb->destructor = sock_efree;
+ DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
+ skb->destructor = sock_pfree;
dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
@@ -2792,11 +2792,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case UDP_CORK:
val = udp_test_bit(CORK, sk);
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index dc41a22ee80e..38cb3a28e4ed 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -237,6 +237,7 @@ static int udplite_diag_destroy(struct sk_buff *in_skb,
#endif
static const struct inet_diag_handler udp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = udp_diag_dump,
.dump_one = udp_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
@@ -260,6 +261,7 @@ static int udplite_diag_dump_one(struct netlink_callback *cb,
}
static const struct inet_diag_handler udplite_diag_handler = {
+ .owner = THIS_MODULE,
.dump = udplite_diag_dump,
.dump_one = udplite_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6c95d28d0c4a..b9880743765c 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -737,15 +737,14 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
}
-static const struct net_offload udpv4_offload = {
- .callbacks = {
- .gso_segment = udp4_ufo_fragment,
- .gro_receive = udp4_gro_receive,
- .gro_complete = udp4_gro_complete,
- },
-};
-
int __init udpv4_offload_init(void)
{
- return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
+ net_hotdata.udpv4_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = udp4_ufo_fragment,
+ .gro_receive = udp4_gro_receive,
+ .gro_complete = udp4_gro_complete,
+ },
+ };
+ return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
}
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index c54676998eb6..dae35101d189 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -58,7 +58,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
return -iph->protocol;
#endif
- __skb_push(skb, skb->data - skb_network_header(skb));
+ __skb_push(skb, -skb_network_offset(skb));
iph->tot_len = htons(skb->len);
ip_send_check(iph);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 055230b669cf..247bd4d8ee45 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -195,6 +195,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
+ .regen_min_advance = REGEN_MIN_ADVANCE,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
@@ -257,6 +258,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
+ .regen_min_advance = REGEN_MIN_ADVANCE,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
@@ -549,7 +551,8 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
goto out;
if ((all || type == NETCONFA_FORWARDING) &&
- nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
+ nla_put_s32(skb, NETCONFA_FORWARDING,
+ READ_ONCE(devconf->forwarding)) < 0)
goto nla_put_failure;
#ifdef CONFIG_IPV6_MROUTE
if ((all || type == NETCONFA_MC_FORWARDING) &&
@@ -558,12 +561,13 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
goto nla_put_failure;
#endif
if ((all || type == NETCONFA_PROXY_NEIGH) &&
- nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
+ nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
+ READ_ONCE(devconf->proxy_ndp)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
- devconf->ignore_routes_with_linkdown) < 0)
+ READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0)
goto nla_put_failure;
out:
@@ -713,7 +717,7 @@ errout:
static u32 inet6_base_seq(const struct net *net)
{
u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
- net->dev_base_seq;
+ READ_ONCE(net->dev_base_seq);
/* Must not return 0 (see nl_dump_check_consistent()).
* Chose a value far away from 0.
@@ -723,17 +727,18 @@ static u32 inet6_base_seq(const struct net *net)
return res;
}
-
static int inet6_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx, s_idx;
+ struct {
+ unsigned long ifindex;
+ unsigned int all_default;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
+ int err = 0;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
@@ -750,64 +755,46 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
}
}
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = inet6_base_seq(net);
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
-
- if (inet6_netconf_fill_devconf(skb, dev->ifindex,
- &idev->cnf,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF,
- NLM_F_MULTI,
- NETCONFA_ALL) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = inet6_netconf_fill_devconf(skb, dev->ifindex,
+ &idev->cnf,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF,
+ NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
+ goto done;
}
- if (h == NETDEV_HASHENTRIES) {
- if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
- net->ipv6.devconf_all,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ if (ctx->all_default == 0) {
+ err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
+ net->ipv6.devconf_all,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
- }
- if (h == NETDEV_HASHENTRIES + 1) {
- if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
- net->ipv6.devconf_dflt,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ ctx->all_default++;
+ }
+ if (ctx->all_default == 1) {
+ err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
+ net->ipv6.devconf_dflt,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
+ ctx->all_default++;
}
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-
- return skb->len;
+ rcu_read_unlock();
+ return err;
}
#ifdef CONFIG_SYSCTL
@@ -867,7 +854,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!newf);
- idev->cnf.forwarding = newf;
+
+ WRITE_ONCE(idev->cnf.forwarding, newf);
if (changed)
dev_forward_change(idev);
}
@@ -884,7 +872,7 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
net = (struct net *)table->extra2;
old = *p;
- *p = newf;
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_dflt->forwarding) {
if ((!newf) ^ (!old))
@@ -899,7 +887,7 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
if (p == &net->ipv6.devconf_all->forwarding) {
int old_dflt = net->ipv6.devconf_dflt->forwarding;
- net->ipv6.devconf_dflt->forwarding = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf);
if ((!newf) ^ (!old_dflt))
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_FORWARDING,
@@ -931,7 +919,7 @@ static void addrconf_linkdown_change(struct net *net, __s32 newf)
if (idev) {
int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
- idev->cnf.ignore_routes_with_linkdown = newf;
+ WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf);
if (changed)
inet6_netconf_notify_devconf(dev_net(dev),
RTM_NEWNETCONF,
@@ -952,7 +940,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
net = (struct net *)table->extra2;
old = *p;
- *p = newf;
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
if ((!newf) ^ (!old))
@@ -966,7 +954,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
}
if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
- net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf);
addrconf_linkdown_change(net, newf);
if ((!newf) ^ (!old))
inet6_netconf_notify_devconf(net,
@@ -1270,6 +1258,7 @@ static void
cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
bool del_rt, bool del_peer)
{
+ struct fib6_table *table;
struct fib6_info *f6i;
f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
@@ -1279,8 +1268,15 @@ cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
if (del_rt)
ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
else {
- if (!(f6i->fib6_flags & RTF_EXPIRES))
+ if (!(f6i->fib6_flags & RTF_EXPIRES)) {
+ table = f6i->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
fib6_set_expires(f6i, expires);
+ fib6_add_gc_list(f6i);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
fib6_info_release(f6i);
}
}
@@ -1346,12 +1342,21 @@ out:
in6_ifa_put(ifp);
}
+static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev)
+{
+ return READ_ONCE(idev->cnf.regen_min_advance) +
+ READ_ONCE(idev->cnf.regen_max_retry) *
+ READ_ONCE(idev->cnf.dad_transmits) *
+ max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+}
+
static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
{
struct inet6_dev *idev = ifp->idev;
unsigned long tmp_tstamp, age;
unsigned long regen_advance;
unsigned long now = jiffies;
+ u32 if_public_preferred_lft;
s32 cnf_temp_preferred_lft;
struct inet6_ifaddr *ift;
struct ifa6_config cfg;
@@ -1363,7 +1368,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
retry:
in6_dev_hold(idev);
- if (idev->cnf.use_tempaddr <= 0) {
+ if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) {
write_unlock_bh(&idev->lock);
pr_info("%s: use_tempaddr is disabled\n", __func__);
in6_dev_put(idev);
@@ -1371,8 +1376,8 @@ retry:
goto out;
}
spin_lock_bh(&ifp->lock);
- if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
- idev->cnf.use_tempaddr = -1; /*XXX*/
+ if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) {
+ WRITE_ONCE(idev->cnf.use_tempaddr, -1); /*XXX*/
spin_unlock_bh(&ifp->lock);
write_unlock_bh(&idev->lock);
pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
@@ -1387,16 +1392,14 @@ retry:
age = (now - ifp->tstamp) / HZ;
- regen_advance = idev->cnf.regen_max_retry *
- idev->cnf.dad_transmits *
- max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+ regen_advance = ipv6_get_regen_advance(idev);
/* recalculate max_desync_factor each time and update
* idev->desync_factor if it's larger
*/
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
max_desync_factor = min_t(long,
- idev->cnf.max_desync_factor,
+ READ_ONCE(idev->cnf.max_desync_factor),
cnf_temp_preferred_lft - regen_advance);
if (unlikely(idev->desync_factor > max_desync_factor)) {
@@ -1409,11 +1412,13 @@ retry:
}
}
+ if_public_preferred_lft = ifp->prefered_lft;
+
memset(&cfg, 0, sizeof(cfg));
cfg.valid_lft = min_t(__u32, ifp->valid_lft,
- idev->cnf.temp_valid_lft + age);
+ READ_ONCE(idev->cnf.temp_valid_lft) + age);
cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
- cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
+ cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft);
cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft);
cfg.plen = ifp->prefix_len;
@@ -1422,19 +1427,41 @@ retry:
write_unlock_bh(&idev->lock);
- /* A temporary address is created only if this calculated Preferred
- * Lifetime is greater than REGEN_ADVANCE time units. In particular,
- * an implementation must not create a temporary address with a zero
- * Preferred Lifetime.
+ /* From RFC 4941:
+ *
+ * A temporary address is created only if this calculated Preferred
+ * Lifetime is greater than REGEN_ADVANCE time units. In
+ * particular, an implementation must not create a temporary address
+ * with a zero Preferred Lifetime.
+ *
+ * ...
+ *
+ * When creating a temporary address, the lifetime values MUST be
+ * derived from the corresponding prefix as follows:
+ *
+ * ...
+ *
+ * * Its Preferred Lifetime is the lower of the Preferred Lifetime
+ * of the public address or TEMP_PREFERRED_LIFETIME -
+ * DESYNC_FACTOR.
+ *
+ * To comply with the RFC's requirements, clamp the preferred lifetime
+ * to a minimum of regen_advance, unless that would exceed valid_lft or
+ * ifp->prefered_lft.
+ *
* Use age calculation as in addrconf_verify to avoid unnecessary
* temporary addresses being generated.
*/
age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (cfg.preferred_lft <= regen_advance + age) {
- in6_ifa_put(ifp);
- in6_dev_put(idev);
- ret = -1;
- goto out;
+ cfg.preferred_lft = regen_advance + age + 1;
+ if (cfg.preferred_lft > cfg.valid_lft ||
+ cfg.preferred_lft > if_public_preferred_lft) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ ret = -1;
+ goto out;
+ }
}
cfg.ifa_flags = IFA_F_TEMPORARY;
@@ -1513,15 +1540,17 @@ static inline int ipv6_saddr_preferred(int type)
return 0;
}
-static bool ipv6_use_optimistic_addr(struct net *net,
- struct inet6_dev *idev)
+static bool ipv6_use_optimistic_addr(const struct net *net,
+ const struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (!idev)
return false;
- if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+ if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
+ !READ_ONCE(idev->cnf.optimistic_dad))
return false;
- if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
+ if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) &&
+ !READ_ONCE(idev->cnf.use_optimistic))
return false;
return true;
@@ -1530,13 +1559,14 @@ static bool ipv6_use_optimistic_addr(struct net *net,
#endif
}
-static bool ipv6_allow_optimistic_dad(struct net *net,
- struct inet6_dev *idev)
+static bool ipv6_allow_optimistic_dad(const struct net *net,
+ const struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (!idev)
return false;
- if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+ if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
+ !READ_ONCE(idev->cnf.optimistic_dad))
return false;
return true;
@@ -1642,7 +1672,7 @@ static int ipv6_get_saddr_eval(struct net *net,
*/
int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
- score->ifa->idev->cnf.use_tempaddr >= 2;
+ READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2;
ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
break;
}
@@ -1818,7 +1848,7 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
idev = __in6_dev_get(dst_dev);
if ((dst_type & IPV6_ADDR_MULTICAST) ||
dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
- (idev && idev->cnf.use_oif_addrs_only)) {
+ (idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) {
use_oif_addr = true;
}
}
@@ -2125,6 +2155,7 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net *net = dev_net(idev->dev);
+ int max_addresses;
if (addrconf_dad_end(ifp)) {
in6_ifa_put(ifp);
@@ -2162,9 +2193,9 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
spin_unlock_bh(&ifp->lock);
- if (idev->cnf.max_addresses &&
- ipv6_count_addresses(idev) >=
- idev->cnf.max_addresses)
+ max_addresses = READ_ONCE(idev->cnf.max_addresses);
+ if (max_addresses &&
+ ipv6_count_addresses(idev) >= max_addresses)
goto lock_errdad;
net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
@@ -2561,11 +2592,11 @@ static void manage_tempaddrs(struct inet6_dev *idev,
* (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
*/
age = (now - ift->cstamp) / HZ;
- max_valid = idev->cnf.temp_valid_lft - age;
+ max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age;
if (max_valid < 0)
max_valid = 0;
- max_prefered = idev->cnf.temp_prefered_lft -
+ max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) -
idev->desync_factor - age;
if (max_prefered < 0)
max_prefered = 0;
@@ -2598,7 +2629,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
create = true;
- if (create && idev->cnf.use_tempaddr > 0) {
+ if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) {
/* When a new public address is created as described
* in [ADDRCONF], also create a new temporary address.
*/
@@ -2626,7 +2657,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
int create = 0, update_lft = 0;
if (!ifp && valid_lft) {
- int max_addresses = in6_dev->cnf.max_addresses;
+ int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses);
struct ifa6_config cfg = {
.pfx = addr,
.plen = pinfo->prefix_len,
@@ -2638,8 +2669,8 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
};
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if ((net->ipv6.devconf_all->optimistic_dad ||
- in6_dev->cnf.optimistic_dad) &&
+ if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) ||
+ READ_ONCE(in6_dev->cnf.optimistic_dad)) &&
!net->ipv6.devconf_all->forwarding && sllao)
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -2688,7 +2719,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
*/
update_lft = !create && stored_lft;
- if (update_lft && !in6_dev->cnf.ra_honor_pio_life) {
+ if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) {
const u32 minimum_lft = min_t(u32,
stored_lft, MIN_VALID_LIFETIME);
valid_lft = max(valid_lft, minimum_lft);
@@ -2697,7 +2728,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
if (update_lft) {
ifp->valid_lft = valid_lft;
ifp->prefered_lft = prefered_lft;
- ifp->tstamp = now;
+ WRITE_ONCE(ifp->tstamp, now);
flags = ifp->flags;
ifp->flags &= ~IFA_F_DEPRECATED;
spin_unlock_bh(&ifp->lock);
@@ -2721,6 +2752,7 @@ EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
{
struct prefix_info *pinfo;
+ struct fib6_table *table;
__u32 valid_lft;
__u32 prefered_lft;
int addr_type, err;
@@ -2797,11 +2829,20 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
if (valid_lft == 0) {
ip6_del_rt(net, rt, false);
rt = NULL;
- } else if (addrconf_finite_timeout(rt_expires)) {
- /* not infinity */
- fib6_set_expires(rt, jiffies + rt_expires);
} else {
- fib6_clean_expires(rt);
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (addrconf_finite_timeout(rt_expires)) {
+ /* not infinity */
+ fib6_set_expires(rt, jiffies + rt_expires);
+ fib6_add_gc_list(rt);
+ } else {
+ fib6_clean_expires(rt);
+ fib6_remove_gc_list(rt);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
}
} else if (valid_lft) {
clock_t expires = 0;
@@ -3262,8 +3303,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev,
struct inet6_ifaddr *ifp;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
- idev->cnf.optimistic_dad) &&
+ if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) ||
+ READ_ONCE(idev->cnf.optimistic_dad)) &&
!dev_net(idev->dev)->ipv6.devconf_all->forwarding)
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -3442,7 +3483,8 @@ static void addrconf_dev_config(struct net_device *dev)
/* this device type has no EUI support */
if (dev->type == ARPHRD_NONE &&
idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
- idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_RANDOM);
addrconf_addr_gen(idev, false);
}
@@ -3620,7 +3662,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (idev) {
rt6_mtu_change(dev, dev->mtu);
- idev->cnf.mtu6 = dev->mtu;
+ WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
break;
}
@@ -3712,9 +3754,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (idev->cnf.mtu6 != dev->mtu &&
dev->mtu >= IPV6_MIN_MTU) {
rt6_mtu_change(dev, dev->mtu);
- idev->cnf.mtu6 = dev->mtu;
+ WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
}
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
inet6_ifinfo_notify(RTM_NEWLINK, idev);
/*
@@ -3834,10 +3876,10 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
*/
if (!unregister && !idev->cnf.disable_ipv6) {
/* aggregate the system setting and interface setting */
- int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
+ int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down);
if (!_keep_addr)
- _keep_addr = idev->cnf.keep_addr_on_down;
+ _keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down);
keep_addr = (_keep_addr > 0);
}
@@ -3956,7 +3998,7 @@ restart:
ipv6_mc_down(idev);
}
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
idev->ra_mtu = 0;
/* Last: Shot the device (if unregistered) */
@@ -3974,6 +4016,7 @@ static void addrconf_rs_timer(struct timer_list *t)
struct inet6_dev *idev = from_timer(idev, t, rs_timer);
struct net_device *dev = idev->dev;
struct in6_addr lladdr;
+ int rtr_solicits;
write_lock(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY))
@@ -3986,7 +4029,9 @@ static void addrconf_rs_timer(struct timer_list *t)
if (idev->if_flags & IF_RA_RCVD)
goto out;
- if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
+ rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits);
+
+ if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) {
write_unlock(&idev->lock);
if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
ndisc_send_rs(dev, &lladdr,
@@ -3996,11 +4041,12 @@ static void addrconf_rs_timer(struct timer_list *t)
write_lock(&idev->lock);
idev->rs_interval = rfc3315_s14_backoff_update(
- idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
+ idev->rs_interval,
+ READ_ONCE(idev->cnf.rtr_solicit_max_interval));
/* The wait after the last probe can be shorter */
addrconf_mod_rs_timer(idev, (idev->rs_probes ==
- idev->cnf.rtr_solicits) ?
- idev->cnf.rtr_solicit_delay :
+ READ_ONCE(idev->cnf.rtr_solicits)) ?
+ READ_ONCE(idev->cnf.rtr_solicit_delay) :
idev->rs_interval);
} else {
/*
@@ -4021,24 +4067,25 @@ put:
*/
static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
{
- unsigned long rand_num;
struct inet6_dev *idev = ifp->idev;
+ unsigned long rand_num;
u64 nonce;
if (ifp->flags & IFA_F_OPTIMISTIC)
rand_num = 0;
else
- rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1);
+ rand_num = get_random_u32_below(
+ READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1);
nonce = 0;
- if (idev->cnf.enhanced_dad ||
- dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
+ if (READ_ONCE(idev->cnf.enhanced_dad) ||
+ READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) {
do
get_random_bytes(&nonce, 6);
while (nonce == 0);
}
ifp->dad_nonce = nonce;
- ifp->dad_probes = idev->cnf.dad_transmits;
+ ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits);
addrconf_mod_dad_work(ifp, rand_num);
}
@@ -4058,8 +4105,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
net = dev_net(dev);
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
- (net->ipv6.devconf_all->accept_dad < 1 &&
- idev->cnf.accept_dad < 1) ||
+ (READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 &&
+ READ_ONCE(idev->cnf.accept_dad) < 1) ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
bool send_na = false;
@@ -4151,8 +4198,8 @@ static void addrconf_dad_work(struct work_struct *w)
action = DAD_ABORT;
ifp->state = INET6_IFADDR_STATE_POSTDAD;
- if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
- idev->cnf.accept_dad > 1) &&
+ if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->accept_dad) > 1 ||
+ READ_ONCE(idev->cnf.accept_dad) > 1) &&
!idev->cnf.disable_ipv6 &&
!(ifp->flags & IFA_F_STABLE_PRIVACY)) {
struct in6_addr addr;
@@ -4163,7 +4210,7 @@ static void addrconf_dad_work(struct work_struct *w)
if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
ipv6_addr_equal(&ifp->addr, &addr)) {
/* DAD failed for link-local based on MAC */
- idev->cnf.disable_ipv6 = 1;
+ WRITE_ONCE(idev->cnf.disable_ipv6, 1);
pr_info("%s: IPv6 being disabled!\n",
ifp->idev->dev->name);
@@ -4277,7 +4324,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
send_rs = send_mld &&
ipv6_accept_ra(ifp->idev) &&
- ifp->idev->cnf.rtr_solicits != 0 &&
+ READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 &&
(dev->flags & IFF_LOOPBACK) == 0 &&
(dev->type != ARPHRD_TUNNEL) &&
!netif_is_team_port(dev);
@@ -4291,8 +4338,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
/* send unsolicited NA if enabled */
if (send_na &&
- (ifp->idev->cnf.ndisc_notify ||
- dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
+ (READ_ONCE(ifp->idev->cnf.ndisc_notify) ||
+ READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) {
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
/*router=*/ !!ifp->idev->cnf.forwarding,
/*solicited=*/ false, /*override=*/ true,
@@ -4312,7 +4359,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
write_lock_bh(&ifp->idev->lock);
spin_lock(&ifp->lock);
ifp->idev->rs_interval = rfc3315_s14_backoff_init(
- ifp->idev->cnf.rtr_solicit_interval);
+ READ_ONCE(ifp->idev->cnf.rtr_solicit_interval));
ifp->idev->rs_probes = 1;
ifp->idev->if_flags |= IF_RS_SENT;
addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
@@ -4592,9 +4639,7 @@ restart:
!ifp->regen_count && ifp->ifpub) {
/* This is a non-regenerated temporary addr. */
- unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
- ifp->idev->cnf.dad_transmits *
- max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+ unsigned long regen_advance = ipv6_get_regen_advance(ifp->idev);
if (age + regen_advance >= ifp->prefered_lft) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
@@ -4756,6 +4801,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
unsigned long expires, u32 flags,
bool modify_peer)
{
+ struct fib6_table *table;
struct fib6_info *f6i;
u32 prio;
@@ -4776,10 +4822,18 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
} else {
- if (!expires)
+ table = f6i->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (!(flags & RTF_EXPIRES)) {
fib6_clean_expires(f6i);
- else
+ fib6_remove_gc_list(f6i);
+ } else {
fib6_set_expires(f6i, expires);
+ fib6_add_gc_list(f6i);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
fib6_info_release(f6i);
}
@@ -4842,13 +4896,13 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
IFA_F_NOPREFIXROUTE);
ifp->flags |= cfg->ifa_flags;
- ifp->tstamp = jiffies;
- ifp->valid_lft = cfg->valid_lft;
- ifp->prefered_lft = cfg->preferred_lft;
- ifp->ifa_proto = cfg->ifa_proto;
+ WRITE_ONCE(ifp->tstamp, jiffies);
+ WRITE_ONCE(ifp->valid_lft, cfg->valid_lft);
+ WRITE_ONCE(ifp->prefered_lft, cfg->preferred_lft);
+ WRITE_ONCE(ifp->ifa_proto, cfg->ifa_proto);
if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
- ifp->rt_priority = cfg->rt_priority;
+ WRITE_ONCE(ifp->rt_priority, cfg->rt_priority);
if (new_peer)
ifp->peer_addr = *cfg->peer_pfx;
@@ -5069,17 +5123,21 @@ struct inet6_fill_args {
enum addr_type_t type;
};
-static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
+static int inet6_fill_ifaddr(struct sk_buff *skb,
+ const struct inet6_ifaddr *ifa,
struct inet6_fill_args *args)
{
- struct nlmsghdr *nlh;
+ struct nlmsghdr *nlh;
u32 preferred, valid;
+ u32 flags, priority;
+ u8 proto;
nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
sizeof(struct ifaddrmsg), args->flags);
if (!nlh)
return -EMSGSIZE;
+ flags = READ_ONCE(ifa->flags);
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
@@ -5087,13 +5145,14 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
goto error;
- spin_lock_bh(&ifa->lock);
- if (!((ifa->flags&IFA_F_PERMANENT) &&
- (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
- preferred = ifa->prefered_lft;
- valid = ifa->valid_lft;
+ preferred = READ_ONCE(ifa->prefered_lft);
+ valid = READ_ONCE(ifa->valid_lft);
+
+ if (!((flags & IFA_F_PERMANENT) &&
+ (preferred == INFINITY_LIFE_TIME))) {
if (preferred != INFINITY_LIFE_TIME) {
- long tval = (jiffies - ifa->tstamp)/HZ;
+ long tval = (jiffies - READ_ONCE(ifa->tstamp)) / HZ;
+
if (preferred > tval)
preferred -= tval;
else
@@ -5109,28 +5168,29 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
preferred = INFINITY_LIFE_TIME;
valid = INFINITY_LIFE_TIME;
}
- spin_unlock_bh(&ifa->lock);
if (!ipv6_addr_any(&ifa->peer_addr)) {
if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
goto error;
- } else
+ } else {
if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
goto error;
+ }
- if (ifa->rt_priority &&
- nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
+ priority = READ_ONCE(ifa->rt_priority);
+ if (priority && nla_put_u32(skb, IFA_RT_PRIORITY, priority))
goto error;
- if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
+ if (put_cacheinfo(skb, ifa->cstamp, READ_ONCE(ifa->tstamp),
+ preferred, valid) < 0)
goto error;
- if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
+ if (nla_put_u32(skb, IFA_FLAGS, flags) < 0)
goto error;
- if (ifa->ifa_proto &&
- nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto))
+ proto = READ_ONCE(ifa->ifa_proto);
+ if (proto && nla_put_u8(skb, IFA_PROTO, proto))
goto error;
nlmsg_end(skb, nlh);
@@ -5141,12 +5201,13 @@ error:
return -EMSGSIZE;
}
-static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
+static int inet6_fill_ifmcaddr(struct sk_buff *skb,
+ const struct ifmcaddr6 *ifmca,
struct inet6_fill_args *args)
{
- struct nlmsghdr *nlh;
- u8 scope = RT_SCOPE_UNIVERSE;
int ifindex = ifmca->idev->dev->ifindex;
+ u8 scope = RT_SCOPE_UNIVERSE;
+ struct nlmsghdr *nlh;
if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
@@ -5164,7 +5225,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
- put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
+ put_cacheinfo(skb, ifmca->mca_cstamp, READ_ONCE(ifmca->mca_tstamp),
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -5174,13 +5235,14 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
return 0;
}
-static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
+static int inet6_fill_ifacaddr(struct sk_buff *skb,
+ const struct ifacaddr6 *ifaca,
struct inet6_fill_args *args)
{
struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
int ifindex = dev ? dev->ifindex : 1;
- struct nlmsghdr *nlh;
u8 scope = RT_SCOPE_UNIVERSE;
+ struct nlmsghdr *nlh;
if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
@@ -5198,7 +5260,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
- put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
+ put_cacheinfo(skb, ifaca->aca_cstamp, READ_ONCE(ifaca->aca_tstamp),
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -5209,24 +5271,23 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
}
/* called with rcu_read_lock() */
-static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
- struct netlink_callback *cb, int s_ip_idx,
+static int in6_dump_addrs(const struct inet6_dev *idev, struct sk_buff *skb,
+ struct netlink_callback *cb, int *s_ip_idx,
struct inet6_fill_args *fillargs)
{
- struct ifmcaddr6 *ifmca;
- struct ifacaddr6 *ifaca;
+ const struct ifmcaddr6 *ifmca;
+ const struct ifacaddr6 *ifaca;
int ip_idx = 0;
- int err = 1;
+ int err = 0;
- read_lock_bh(&idev->lock);
switch (fillargs->type) {
case UNICAST_ADDR: {
- struct inet6_ifaddr *ifa;
+ const struct inet6_ifaddr *ifa;
fillargs->event = RTM_NEWADDR;
/* unicast address incl. temp addr */
- list_for_each_entry(ifa, &idev->addr_list, if_list) {
- if (ip_idx < s_ip_idx)
+ list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
+ if (ip_idx < *s_ip_idx)
goto next;
err = inet6_fill_ifaddr(skb, ifa, fillargs);
if (err < 0)
@@ -5238,27 +5299,25 @@ next:
break;
}
case MULTICAST_ADDR:
- read_unlock_bh(&idev->lock);
fillargs->event = RTM_GETMULTICAST;
/* multicast address */
- for (ifmca = rtnl_dereference(idev->mc_list);
+ for (ifmca = rcu_dereference(idev->mc_list);
ifmca;
- ifmca = rtnl_dereference(ifmca->next), ip_idx++) {
- if (ip_idx < s_ip_idx)
+ ifmca = rcu_dereference(ifmca->next), ip_idx++) {
+ if (ip_idx < *s_ip_idx)
continue;
err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
if (err < 0)
break;
}
- read_lock_bh(&idev->lock);
break;
case ANYCAST_ADDR:
fillargs->event = RTM_GETANYCAST;
/* anycast address */
- for (ifaca = idev->ac_list; ifaca;
- ifaca = ifaca->aca_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
+ for (ifaca = rcu_dereference(idev->ac_list); ifaca;
+ ifaca = rcu_dereference(ifaca->aca_next), ip_idx++) {
+ if (ip_idx < *s_ip_idx)
continue;
err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
if (err < 0)
@@ -5268,8 +5327,7 @@ next:
default:
break;
}
- read_unlock_bh(&idev->lock);
- cb->args[2] = ip_idx;
+ *s_ip_idx = err ? ip_idx : 0;
return err;
}
@@ -5332,6 +5390,7 @@ static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
enum addr_type_t type)
{
+ struct net *tgt_net = sock_net(skb->sk);
const struct nlmsghdr *nlh = cb->nlh;
struct inet6_fill_args fillargs = {
.portid = NETLINK_CB(cb->skb).portid,
@@ -5340,72 +5399,52 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
.netnsid = -1,
.type = type,
};
- struct net *tgt_net = sock_net(skb->sk);
- int idx, s_idx, s_ip_idx;
- int h, s_h;
+ struct {
+ unsigned long ifindex;
+ int ip_idx;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
int err = 0;
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
- s_ip_idx = cb->args[2];
-
+ rcu_read_lock();
if (cb->strict_check) {
err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
skb->sk, cb);
if (err < 0)
- goto put_tgt_net;
+ goto done;
err = 0;
if (fillargs.ifindex) {
- dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
- if (!dev) {
- err = -ENODEV;
- goto put_tgt_net;
- }
+ err = -ENODEV;
+ dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
+ if (!dev)
+ goto done;
idev = __in6_dev_get(dev);
- if (idev) {
- err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
+ if (idev)
+ err = in6_dump_addrs(idev, skb, cb,
+ &ctx->ip_idx,
&fillargs);
- if (err > 0)
- err = 0;
- }
- goto put_tgt_net;
+ goto done;
}
}
- rcu_read_lock();
cb->seq = inet6_base_seq(tgt_net);
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- if (h > s_h || idx > s_idx)
- s_ip_idx = 0;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
-
- if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
- &fillargs) < 0)
- goto done;
-cont:
- idx++;
- }
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ if (err < 0)
+ goto done;
}
done:
rcu_read_unlock();
- cb->args[0] = h;
- cb->args[1] = idx;
-put_tgt_net:
if (fillargs.netnsid >= 0)
put_net(tgt_net);
- return skb->len ? : err;
+ return err;
}
static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
@@ -5578,87 +5617,97 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
}
-static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
- __s32 *array, int bytes)
+static void ipv6_store_devconf(const struct ipv6_devconf *cnf,
+ __s32 *array, int bytes)
{
BUG_ON(bytes < (DEVCONF_MAX * 4));
memset(array, 0, bytes);
- array[DEVCONF_FORWARDING] = cnf->forwarding;
- array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
- array[DEVCONF_MTU6] = cnf->mtu6;
- array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
- array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
- array[DEVCONF_AUTOCONF] = cnf->autoconf;
- array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
- array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
+ array[DEVCONF_FORWARDING] = READ_ONCE(cnf->forwarding);
+ array[DEVCONF_HOPLIMIT] = READ_ONCE(cnf->hop_limit);
+ array[DEVCONF_MTU6] = READ_ONCE(cnf->mtu6);
+ array[DEVCONF_ACCEPT_RA] = READ_ONCE(cnf->accept_ra);
+ array[DEVCONF_ACCEPT_REDIRECTS] = READ_ONCE(cnf->accept_redirects);
+ array[DEVCONF_AUTOCONF] = READ_ONCE(cnf->autoconf);
+ array[DEVCONF_DAD_TRANSMITS] = READ_ONCE(cnf->dad_transmits);
+ array[DEVCONF_RTR_SOLICITS] = READ_ONCE(cnf->rtr_solicits);
array[DEVCONF_RTR_SOLICIT_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_solicit_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_interval));
array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_solicit_max_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_max_interval));
array[DEVCONF_RTR_SOLICIT_DELAY] =
- jiffies_to_msecs(cnf->rtr_solicit_delay);
- array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_delay));
+ array[DEVCONF_FORCE_MLD_VERSION] = READ_ONCE(cnf->force_mld_version);
array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
- jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->mldv1_unsolicited_report_interval));
array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
- jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
- array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
- array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
- array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
- array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
- array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
- array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
- array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
- array[DEVCONF_RA_DEFRTR_METRIC] = cnf->ra_defrtr_metric;
- array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
- array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
+ jiffies_to_msecs(READ_ONCE(cnf->mldv2_unsolicited_report_interval));
+ array[DEVCONF_USE_TEMPADDR] = READ_ONCE(cnf->use_tempaddr);
+ array[DEVCONF_TEMP_VALID_LFT] = READ_ONCE(cnf->temp_valid_lft);
+ array[DEVCONF_TEMP_PREFERED_LFT] = READ_ONCE(cnf->temp_prefered_lft);
+ array[DEVCONF_REGEN_MAX_RETRY] = READ_ONCE(cnf->regen_max_retry);
+ array[DEVCONF_MAX_DESYNC_FACTOR] = READ_ONCE(cnf->max_desync_factor);
+ array[DEVCONF_MAX_ADDRESSES] = READ_ONCE(cnf->max_addresses);
+ array[DEVCONF_ACCEPT_RA_DEFRTR] = READ_ONCE(cnf->accept_ra_defrtr);
+ array[DEVCONF_RA_DEFRTR_METRIC] = READ_ONCE(cnf->ra_defrtr_metric);
+ array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] =
+ READ_ONCE(cnf->accept_ra_min_hop_limit);
+ array[DEVCONF_ACCEPT_RA_PINFO] = READ_ONCE(cnf->accept_ra_pinfo);
#ifdef CONFIG_IPV6_ROUTER_PREF
- array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
+ array[DEVCONF_ACCEPT_RA_RTR_PREF] = READ_ONCE(cnf->accept_ra_rtr_pref);
array[DEVCONF_RTR_PROBE_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_probe_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_probe_interval));
#ifdef CONFIG_IPV6_ROUTE_INFO
- array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
- array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] =
+ READ_ONCE(cnf->accept_ra_rt_info_min_plen);
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] =
+ READ_ONCE(cnf->accept_ra_rt_info_max_plen);
#endif
#endif
- array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
- array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
+ array[DEVCONF_PROXY_NDP] = READ_ONCE(cnf->proxy_ndp);
+ array[DEVCONF_ACCEPT_SOURCE_ROUTE] =
+ READ_ONCE(cnf->accept_source_route);
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
- array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
+ array[DEVCONF_OPTIMISTIC_DAD] = READ_ONCE(cnf->optimistic_dad);
+ array[DEVCONF_USE_OPTIMISTIC] = READ_ONCE(cnf->use_optimistic);
#endif
#ifdef CONFIG_IPV6_MROUTE
array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
#endif
- array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
- array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
- array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
- array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
- array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
- array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
- array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
- array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
+ array[DEVCONF_DISABLE_IPV6] = READ_ONCE(cnf->disable_ipv6);
+ array[DEVCONF_ACCEPT_DAD] = READ_ONCE(cnf->accept_dad);
+ array[DEVCONF_FORCE_TLLAO] = READ_ONCE(cnf->force_tllao);
+ array[DEVCONF_NDISC_NOTIFY] = READ_ONCE(cnf->ndisc_notify);
+ array[DEVCONF_SUPPRESS_FRAG_NDISC] =
+ READ_ONCE(cnf->suppress_frag_ndisc);
+ array[DEVCONF_ACCEPT_RA_FROM_LOCAL] =
+ READ_ONCE(cnf->accept_ra_from_local);
+ array[DEVCONF_ACCEPT_RA_MTU] = READ_ONCE(cnf->accept_ra_mtu);
+ array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] =
+ READ_ONCE(cnf->ignore_routes_with_linkdown);
/* we omit DEVCONF_STABLE_SECRET for now */
- array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
- array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
- array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
- array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
- array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
+ array[DEVCONF_USE_OIF_ADDRS_ONLY] = READ_ONCE(cnf->use_oif_addrs_only);
+ array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] =
+ READ_ONCE(cnf->drop_unicast_in_l2_multicast);
+ array[DEVCONF_DROP_UNSOLICITED_NA] = READ_ONCE(cnf->drop_unsolicited_na);
+ array[DEVCONF_KEEP_ADDR_ON_DOWN] = READ_ONCE(cnf->keep_addr_on_down);
+ array[DEVCONF_SEG6_ENABLED] = READ_ONCE(cnf->seg6_enabled);
#ifdef CONFIG_IPV6_SEG6_HMAC
- array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
+ array[DEVCONF_SEG6_REQUIRE_HMAC] = READ_ONCE(cnf->seg6_require_hmac);
#endif
- array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
- array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
- array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
- array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
- array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
- array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
- array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
- array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
- array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
- array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
- array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
+ array[DEVCONF_ENHANCED_DAD] = READ_ONCE(cnf->enhanced_dad);
+ array[DEVCONF_ADDR_GEN_MODE] = READ_ONCE(cnf->addr_gen_mode);
+ array[DEVCONF_DISABLE_POLICY] = READ_ONCE(cnf->disable_policy);
+ array[DEVCONF_NDISC_TCLASS] = READ_ONCE(cnf->ndisc_tclass);
+ array[DEVCONF_RPL_SEG_ENABLED] = READ_ONCE(cnf->rpl_seg_enabled);
+ array[DEVCONF_IOAM6_ENABLED] = READ_ONCE(cnf->ioam6_enabled);
+ array[DEVCONF_IOAM6_ID] = READ_ONCE(cnf->ioam6_id);
+ array[DEVCONF_IOAM6_ID_WIDE] = READ_ONCE(cnf->ioam6_id_wide);
+ array[DEVCONF_NDISC_EVICT_NOCARRIER] =
+ READ_ONCE(cnf->ndisc_evict_nocarrier);
+ array[DEVCONF_ACCEPT_UNTRACKED_NA] =
+ READ_ONCE(cnf->accept_untracked_na);
+ array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft);
}
static inline size_t inet6_ifla6_size(void)
@@ -5738,13 +5787,14 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
u32 ext_filter_mask)
{
- struct nlattr *nla;
struct ifla_cacheinfo ci;
+ struct nlattr *nla;
+ u32 ra_mtu;
- if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
+ if (nla_put_u32(skb, IFLA_INET6_FLAGS, READ_ONCE(idev->if_flags)))
goto nla_put_failure;
ci.max_reasm_len = IPV6_MAXPLEN;
- ci.tstamp = cstamp_delta(idev->tstamp);
+ ci.tstamp = cstamp_delta(READ_ONCE(idev->tstamp));
ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
@@ -5776,11 +5826,12 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
read_unlock_bh(&idev->lock);
- if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
+ if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE,
+ READ_ONCE(idev->cnf.addr_gen_mode)))
goto nla_put_failure;
- if (idev->ra_mtu &&
- nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu))
+ ra_mtu = READ_ONCE(idev->ra_mtu);
+ if (ra_mtu && nla_put_u32(skb, IFLA_INET6_RA_MTU, ra_mtu))
goto nla_put_failure;
return 0;
@@ -5842,7 +5893,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
return -EINVAL;
}
- if (idev->cnf.rtr_solicits == 0) {
+ if (READ_ONCE(idev->cnf.rtr_solicits) == 0) {
NL_SET_ERR_MSG(extack,
"Router solicitation is disabled on device");
return -EINVAL;
@@ -5875,7 +5926,7 @@ update_lft:
if (update_rs) {
idev->if_flags |= IF_RS_SENT;
idev->rs_interval = rfc3315_s14_backoff_init(
- idev->cnf.rtr_solicit_interval);
+ READ_ONCE(idev->cnf.rtr_solicit_interval));
idev->rs_probes = 1;
addrconf_mod_rs_timer(idev, idev->rs_interval);
}
@@ -5981,7 +6032,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
- idev->cnf.addr_gen_mode = mode;
+ WRITE_ONCE(idev->cnf.addr_gen_mode, mode);
}
return 0;
@@ -5993,6 +6044,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
struct net_device *dev = idev->dev;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
+ int ifindex, iflink;
void *protoinfo;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
@@ -6003,18 +6055,20 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
hdr->ifi_family = AF_INET6;
hdr->__ifi_pad = 0;
hdr->ifi_type = dev->type;
- hdr->ifi_index = dev->ifindex;
+ ifindex = READ_ONCE(dev->ifindex);
+ hdr->ifi_index = ifindex;
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
+ iflink = dev_get_iflink(dev);
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
- nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
- (dev->ifindex != dev_get_iflink(dev) &&
- nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+ nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
+ (ifindex != iflink &&
+ nla_put_u32(skb, IFLA_LINK, iflink)) ||
nla_put_u8(skb, IFLA_OPERSTATE,
- netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
+ netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN))
goto nla_put_failure;
protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
if (!protoinfo)
@@ -6060,50 +6114,39 @@ static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx = 0, s_idx;
+ struct {
+ unsigned long ifindex;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
+ int err;
/* only requests using strict checking can pass data to
* influence the dump
*/
if (cb->strict_check) {
- int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
+ err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
if (err < 0)
return err;
}
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
+ err = 0;
rcu_read_lock();
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
- if (inet6_fill_ifinfo(skb, idev,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWLINK, NLM_F_MULTI) < 0)
- goto out;
-cont:
- idx++;
- }
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = inet6_fill_ifinfo(skb, idev,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWLINK, NLM_F_MULTI);
+ if (err < 0)
+ break;
}
-out:
rcu_read_unlock();
- cb->args[1] = idx;
- cb->args[0] = h;
- return skb->len;
+ return err;
}
void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
@@ -6324,7 +6367,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
- idev->cnf.disable_ipv6 = newf;
+
+ WRITE_ONCE(idev->cnf.disable_ipv6, newf);
if (changed)
dev_disable_change(idev);
}
@@ -6333,23 +6377,22 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
{
- struct net *net;
+ struct net *net = (struct net *)table->extra2;
int old;
+ if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
+ WRITE_ONCE(*p, newf);
+ return 0;
+ }
+
if (!rtnl_trylock())
return restart_syscall();
- net = (struct net *)table->extra2;
old = *p;
- *p = newf;
-
- if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
- rtnl_unlock();
- return 0;
- }
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_all->disable_ipv6) {
- net->ipv6.devconf_dflt->disable_ipv6 = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf);
addrconf_disable_change(net, newf);
} else if ((!newf) ^ (!old))
dev_disable_change((struct inet6_dev *)table->extra1);
@@ -6460,24 +6503,25 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
}
if (idev->cnf.addr_gen_mode != new_val) {
- idev->cnf.addr_gen_mode = new_val;
+ WRITE_ONCE(idev->cnf.addr_gen_mode, new_val);
addrconf_init_auto_addrs(idev->dev);
}
} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
struct net_device *dev;
- net->ipv6.devconf_dflt->addr_gen_mode = new_val;
+ WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val);
for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev &&
idev->cnf.addr_gen_mode != new_val) {
- idev->cnf.addr_gen_mode = new_val;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ new_val);
addrconf_init_auto_addrs(idev->dev);
}
}
}
- *((u32 *)ctl->data) = new_val;
+ WRITE_ONCE(*((u32 *)ctl->data), new_val);
}
out:
@@ -6536,14 +6580,15 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
struct inet6_dev *idev = __in6_dev_get(dev);
if (idev) {
- idev->cnf.addr_gen_mode =
- IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
}
}
} else {
struct inet6_dev *idev = ctl->extra1;
- idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
}
out:
@@ -6623,20 +6668,19 @@ void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
static
int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
{
+ struct net *net = (struct net *)ctl->extra2;
struct inet6_dev *idev;
- struct net *net;
- if (!rtnl_trylock())
- return restart_syscall();
-
- *valp = val;
-
- net = (struct net *)ctl->extra2;
if (valp == &net->ipv6.devconf_dflt->disable_policy) {
- rtnl_unlock();
+ WRITE_ONCE(*valp, val);
return 0;
}
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ WRITE_ONCE(*valp, val);
+
if (valp == &net->ipv6.devconf_all->disable_policy) {
struct net_device *dev;
@@ -6806,6 +6850,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "regen_min_advance",
+ .data = &ipv6_devconf.regen_min_advance,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "regen_max_retry",
.data = &ipv6_devconf.regen_max_retry,
.maxlen = sizeof(int),
@@ -7365,7 +7416,8 @@ int __init addrconf_init(void)
if (err < 0)
goto out_addrlabel;
- addrconf_wq = create_workqueue("ipv6_addrconf");
+ /* All works using addrconf_wq need to lock rtnl. */
+ addrconf_wq = create_singlethread_workqueue("ipv6_addrconf");
if (!addrconf_wq) {
err = -ENOMEM;
goto out_nowq;
@@ -7388,7 +7440,7 @@ int __init addrconf_init(void)
rtnl_af_register(&inet6_ops);
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
- NULL, inet6_dump_ifinfo, 0);
+ NULL, inet6_dump_ifinfo, RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
@@ -7402,21 +7454,25 @@ int __init addrconf_init(void)
goto errout;
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
inet6_rtm_getaddr, inet6_dump_ifaddr,
- RTNL_FLAG_DOIT_UNLOCKED);
+ RTNL_FLAG_DOIT_UNLOCKED |
+ RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
- NULL, inet6_dump_ifmcaddr, 0);
+ NULL, inet6_dump_ifmcaddr,
+ RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
- NULL, inet6_dump_ifacaddr, 0);
+ NULL, inet6_dump_ifacaddr,
+ RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
inet6_netconf_get_devconf,
inet6_netconf_dump_devconf,
- RTNL_FLAG_DOIT_UNLOCKED);
+ RTNL_FLAG_DOIT_UNLOCKED |
+ RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
err = ipv6_addr_label_rtnl_register();
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 959bfd9f6344..8041dc181bd4 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -64,6 +64,7 @@
#include <net/xfrm.h>
#include <net/ioam6.h>
#include <net/rawv6.h>
+#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/mroute6.h>
@@ -736,7 +737,7 @@ const struct proto_ops inet6_dgram_ops = {
.recvmsg = inet6_recvmsg, /* retpoline's sake */
.read_skb = udp_read_skb,
.mmap = sock_no_mmap,
- .set_peek_off = sk_set_peek_off,
+ .set_peek_off = udp_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
#endif
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index bb17f484ee2c..0f2506e35359 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -296,7 +296,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
goto out;
}
- for (aca = idev->ac_list; aca; aca = aca->aca_next) {
+ for (aca = rtnl_dereference(idev->ac_list); aca;
+ aca = rtnl_dereference(aca->aca_next)) {
if (ipv6_addr_equal(&aca->aca_addr, addr)) {
aca->aca_users++;
err = 0;
@@ -317,13 +318,13 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
goto out;
}
- aca->aca_next = idev->ac_list;
- idev->ac_list = aca;
-
/* Hold this for addrconf_join_solict() below before we unlock,
* it is already exposed via idev->ac_list.
*/
aca_get(aca);
+ aca->aca_next = idev->ac_list;
+ rcu_assign_pointer(idev->ac_list, aca);
+
write_unlock_bh(&idev->lock);
ipv6_add_acaddr_hash(net, aca);
@@ -350,7 +351,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
write_lock_bh(&idev->lock);
prev_aca = NULL;
- for (aca = idev->ac_list; aca; aca = aca->aca_next) {
+ for (aca = rtnl_dereference(idev->ac_list); aca;
+ aca = rtnl_dereference(aca->aca_next)) {
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
prev_aca = aca;
@@ -364,9 +366,9 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
return 0;
}
if (prev_aca)
- prev_aca->aca_next = aca->aca_next;
+ rcu_assign_pointer(prev_aca->aca_next, aca->aca_next);
else
- idev->ac_list = aca->aca_next;
+ rcu_assign_pointer(idev->ac_list, aca->aca_next);
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
addrconf_leave_solict(idev, &aca->aca_addr);
@@ -392,8 +394,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
struct ifacaddr6 *aca;
write_lock_bh(&idev->lock);
- while ((aca = idev->ac_list) != NULL) {
- idev->ac_list = aca->aca_next;
+ while ((aca = rtnl_dereference(idev->ac_list)) != NULL) {
+ rcu_assign_pointer(idev->ac_list, aca->aca_next);
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
@@ -420,11 +422,10 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
idev = __in6_dev_get(dev);
if (idev) {
- read_lock_bh(&idev->lock);
- for (aca = idev->ac_list; aca; aca = aca->aca_next)
+ for (aca = rcu_dereference(idev->ac_list); aca;
+ aca = rcu_dereference(aca->aca_next))
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
- read_unlock_bh(&idev->lock);
return aca != NULL;
}
return false;
@@ -477,30 +478,25 @@ bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
struct ac6_iter_state {
struct seq_net_private p;
struct net_device *dev;
- struct inet6_dev *idev;
};
#define ac6_seq_private(seq) ((struct ac6_iter_state *)(seq)->private)
static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
{
- struct ifacaddr6 *im = NULL;
struct ac6_iter_state *state = ac6_seq_private(seq);
struct net *net = seq_file_net(seq);
+ struct ifacaddr6 *im = NULL;
- state->idev = NULL;
for_each_netdev_rcu(net, state->dev) {
struct inet6_dev *idev;
+
idev = __in6_dev_get(state->dev);
if (!idev)
continue;
- read_lock_bh(&idev->lock);
- im = idev->ac_list;
- if (im) {
- state->idev = idev;
+ im = rcu_dereference(idev->ac_list);
+ if (im)
break;
- }
- read_unlock_bh(&idev->lock);
}
return im;
}
@@ -508,22 +504,17 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im)
{
struct ac6_iter_state *state = ac6_seq_private(seq);
+ struct inet6_dev *idev;
- im = im->aca_next;
+ im = rcu_dereference(im->aca_next);
while (!im) {
- if (likely(state->idev != NULL))
- read_unlock_bh(&state->idev->lock);
-
state->dev = next_net_device_rcu(state->dev);
- if (!state->dev) {
- state->idev = NULL;
+ if (!state->dev)
break;
- }
- state->idev = __in6_dev_get(state->dev);
- if (!state->idev)
+ idev = __in6_dev_get(state->dev);
+ if (!idev)
continue;
- read_lock_bh(&state->idev->lock);
- im = state->idev->ac_list;
+ im = rcu_dereference(idev->ac_list);
}
return im;
}
@@ -555,12 +546,6 @@ static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ac6_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
- struct ac6_iter_state *state = ac6_seq_private(seq);
-
- if (likely(state->idev != NULL)) {
- read_unlock_bh(&state->idev->lock);
- state->idev = NULL;
- }
rcu_read_unlock();
}
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 1578ed9e97d8..eb8ee1e9373a 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -657,11 +657,8 @@ static int calipso_map_cat_ntoh(const struct calipso_doi *doi_def,
net_clen_bits,
spot + 1,
1);
- if (spot < 0) {
- if (spot == -2)
- return -EFAULT;
+ if (spot < 0)
return 0;
- }
ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
spot,
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 02e9ffb63af1..6789623b2b0d 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -50,6 +50,7 @@
#endif
#include <net/rpl.h>
#include <linux/ioam6.h>
+#include <linux/ioam6_genl.h>
#include <net/ioam6.h>
#include <net/dst_metadata.h>
@@ -378,9 +379,8 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
idev = __in6_dev_get(skb->dev);
- accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
- if (accept_seg6 > idev->cnf.seg6_enabled)
- accept_seg6 = idev->cnf.seg6_enabled;
+ accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled),
+ READ_ONCE(idev->cnf.seg6_enabled));
if (!accept_seg6) {
kfree_skb(skb);
@@ -654,10 +654,13 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
struct ipv6_rt_hdr *hdr;
struct rt0_hdr *rthdr;
struct net *net = dev_net(skb->dev);
- int accept_source_route = net->ipv6.devconf_all->accept_source_route;
+ int accept_source_route;
- if (idev && accept_source_route > idev->cnf.accept_source_route)
- accept_source_route = idev->cnf.accept_source_route;
+ accept_source_route = READ_ONCE(net->ipv6.devconf_all->accept_source_route);
+
+ if (idev)
+ accept_source_route = min(accept_source_route,
+ READ_ONCE(idev->cnf.accept_source_route));
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
@@ -801,7 +804,7 @@ looped_back:
ip6_route_input(skb);
if (skb_dst(skb)->error) {
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
dst_input(skb);
return -1;
}
@@ -818,7 +821,7 @@ looped_back:
goto looped_back;
}
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
dst_input(skb);
return -1;
@@ -880,14 +883,6 @@ void ipv6_exthdrs_exit(void)
Hop-by-hop options.
**********************************/
-/*
- * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
- */
-static inline struct net *ipv6_skb_net(struct sk_buff *skb)
-{
- return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
-}
-
/* Router Alert as of RFC 2711 */
static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
@@ -918,7 +913,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
goto drop;
/* Ignore if IOAM is not enabled on ingress */
- if (!__in6_dev_get(skb->dev)->cnf.ioam6_enabled)
+ if (!READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_enabled))
goto ignore;
/* Truncated Option header */
@@ -938,7 +933,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
goto drop;
/* Ignore if the IOAM namespace is unknown */
- ns = ioam6_namespace(ipv6_skb_net(skb), trace->namespace_id);
+ ns = ioam6_namespace(dev_net(skb->dev), trace->namespace_id);
if (!ns)
goto ignore;
@@ -954,6 +949,9 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
+ optoff + sizeof(*hdr));
ioam6_fill_trace_data(skb, ns, trace, true);
+
+ ioam6_event(IOAM6_EVENT_TRACE, dev_net(skb->dev),
+ GFP_ATOMIC, (void *)trace, hdr->opt_len - 2);
break;
default:
break;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 7523c4baef35..52c04f0ac498 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -449,6 +449,11 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
+ nla_total_size(16); /* src */
}
+static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
+{
+ rt_genid_bump_ipv6(ops->fro_net);
+}
+
static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.family = AF_INET6,
.rule_size = sizeof(struct fib6_rule),
@@ -461,6 +466,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
.nlmsg_payload = fib6_rule_nlmsg_payload,
+ .flush_cache = fib6_rule_flush_cache,
.nlgroup = RTNLGRP_IPV6_RULE,
.owner = THIS_MODULE,
.fro_net = &init_net,
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b0e8d278e8a9..2e81383b663b 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -14,6 +14,7 @@
#include <linux/random.h>
#include <net/addrconf.h>
+#include <net/hotdata.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
@@ -25,16 +26,13 @@ u32 inet6_ehashfn(const struct net *net,
const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const __be16 fport)
{
- static u32 inet6_ehash_secret __read_mostly;
- static u32 ipv6_hash_secret __read_mostly;
-
u32 lhash, fhash;
net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
- net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+ net_get_random_once(&tcp_ipv6_hash_secret, sizeof(tcp_ipv6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
- fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
+ fhash = __ipv6_addr_jhash(faddr, tcp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
inet6_ehash_secret + net_hash_mix(net));
diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c
index 571f0e4d9cf3..08c929513065 100644
--- a/net/ipv6/ioam6.c
+++ b/net/ipv6/ioam6.c
@@ -612,6 +612,68 @@ static const struct genl_ops ioam6_genl_ops[] = {
},
};
+#define IOAM6_GENL_EV_GRP_OFFSET 0
+
+static const struct genl_multicast_group ioam6_mcgrps[] = {
+ [IOAM6_GENL_EV_GRP_OFFSET] = { .name = IOAM6_GENL_EV_GRP_NAME,
+ .flags = GENL_MCAST_CAP_NET_ADMIN },
+};
+
+static int ioam6_event_put_trace(struct sk_buff *skb,
+ struct ioam6_trace_hdr *trace,
+ unsigned int len)
+{
+ if (nla_put_u16(skb, IOAM6_EVENT_ATTR_TRACE_NAMESPACE,
+ be16_to_cpu(trace->namespace_id)) ||
+ nla_put_u8(skb, IOAM6_EVENT_ATTR_TRACE_NODELEN, trace->nodelen) ||
+ nla_put_u32(skb, IOAM6_EVENT_ATTR_TRACE_TYPE,
+ be32_to_cpu(trace->type_be32)) ||
+ nla_put(skb, IOAM6_EVENT_ATTR_TRACE_DATA,
+ len - sizeof(struct ioam6_trace_hdr) - trace->remlen * 4,
+ trace->data + trace->remlen * 4))
+ return 1;
+
+ return 0;
+}
+
+void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
+ void *opt, unsigned int opt_len)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+
+ if (!genl_has_listeners(&ioam6_genl_family, net,
+ IOAM6_GENL_EV_GRP_OFFSET))
+ return;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!skb)
+ return;
+
+ nlh = genlmsg_put(skb, 0, 0, &ioam6_genl_family, 0, type);
+ if (!nlh)
+ goto nla_put_failure;
+
+ switch (type) {
+ case IOAM6_EVENT_UNSPEC:
+ WARN_ON_ONCE(1);
+ break;
+ case IOAM6_EVENT_TRACE:
+ if (ioam6_event_put_trace(skb, (struct ioam6_trace_hdr *)opt,
+ opt_len))
+ goto nla_put_failure;
+ break;
+ }
+
+ genlmsg_end(skb, nlh);
+ genlmsg_multicast_netns(&ioam6_genl_family, net, skb, 0,
+ IOAM6_GENL_EV_GRP_OFFSET, gfp);
+ return;
+
+nla_put_failure:
+ nlmsg_free(skb);
+}
+
static struct genl_family ioam6_genl_family __ro_after_init = {
.name = IOAM6_GENL_NAME,
.version = IOAM6_GENL_VERSION,
@@ -620,6 +682,8 @@ static struct genl_family ioam6_genl_family __ro_after_init = {
.ops = ioam6_genl_ops,
.n_ops = ARRAY_SIZE(ioam6_genl_ops),
.resv_start_op = IOAM6_CMD_NS_SET_SCHEMA + 1,
+ .mcgrps = ioam6_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ioam6_mcgrps),
.module = THIS_MODULE,
};
@@ -663,7 +727,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw16 = IOAM6_U16_UNAVAILABLE;
else
- raw16 = (__force u16)__in6_dev_get(skb->dev)->cnf.ioam6_id;
+ raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
@@ -671,7 +735,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw16 = IOAM6_U16_UNAVAILABLE;
else
- raw16 = (__force u16)__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id;
+ raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
@@ -758,7 +822,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw32 = IOAM6_U32_UNAVAILABLE;
else
- raw32 = __in6_dev_get(skb->dev)->cnf.ioam6_id_wide;
+ raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
@@ -766,7 +830,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw32 = IOAM6_U32_UNAVAILABLE;
else
- raw32 = __in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide;
+ raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4fc2cae0d116..5c558dc1c683 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -160,6 +160,8 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh)
INIT_LIST_HEAD(&f6i->fib6_siblings);
refcount_set(&f6i->fib6_ref, 1);
+ INIT_HLIST_NODE(&f6i->gc_link);
+
return f6i;
}
@@ -246,6 +248,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
net->ipv6.fib6_null_entry);
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&table->tb6_peers);
+ INIT_HLIST_HEAD(&table->tb6_gc_hlist);
}
return table;
@@ -617,8 +620,11 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct rt6_rtnl_dump_arg arg = { .filter.dump_exceptions = true,
- .filter.dump_routes = true };
+ struct rt6_rtnl_dump_arg arg = {
+ .filter.dump_exceptions = true,
+ .filter.dump_routes = true,
+ .filter.rtnl_held = true,
+ };
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
unsigned int h, s_h;
@@ -751,8 +757,6 @@ static struct fib6_node *fib6_add_1(struct net *net,
int bit;
__be32 dir = 0;
- RT6_TRACE("fib6_add_1\n");
-
/* insert node in tree */
fn = root;
@@ -1057,6 +1061,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
lockdep_is_held(&table->tb6_lock));
}
}
+
+ fib6_clean_expires(rt);
+ fib6_remove_gc_list(rt);
}
/*
@@ -1117,10 +1124,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
rt->fib6_nsiblings = 0;
if (!(iter->fib6_flags & RTF_EXPIRES))
return -EEXIST;
- if (!(rt->fib6_flags & RTF_EXPIRES))
+ if (!(rt->fib6_flags & RTF_EXPIRES)) {
fib6_clean_expires(iter);
- else
+ fib6_remove_gc_list(iter);
+ } else {
fib6_set_expires(iter, rt->expires);
+ fib6_add_gc_list(iter);
+ }
if (rt->fib6_pmtu)
fib6_metric_set(iter, RTAX_MTU,
@@ -1479,6 +1489,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
if (rt->nh)
list_add(&rt->nh_list, &rt->nh->f6i_list);
__fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
+
+ if (rt->fib6_flags & RTF_EXPIRES)
+ fib6_add_gc_list(rt);
+
fib6_start_gc(info->nl_net, rt);
}
@@ -1803,7 +1817,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
lockdep_is_held(&table->tb6_lock));
struct fib6_info *new_fn_leaf;
- RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
+ pr_debug("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
iter++;
WARN_ON(fn->fn_flags & RTN_RTINFO);
@@ -1866,7 +1880,8 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
FOR_WALKERS(net, w) {
if (!child) {
if (w->node == fn) {
- RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate);
+ pr_debug("W %p adjusted by delnode 1, s=%d/%d\n",
+ w, w->state, nstate);
w->node = pn;
w->state = nstate;
}
@@ -1874,10 +1889,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
if (w->node == fn) {
w->node = child;
if (children&2) {
- RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
+ pr_debug("W %p adjusted by delnode 2, s=%d\n",
+ w, w->state);
w->state = w->state >= FWS_R ? FWS_U : FWS_INIT;
} else {
- RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
+ pr_debug("W %p adjusted by delnode 2, s=%d\n",
+ w, w->state);
w->state = w->state >= FWS_C ? FWS_U : FWS_INIT;
}
}
@@ -1905,8 +1922,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
struct net *net = info->nl_net;
bool notify_del = false;
- RT6_TRACE("fib6_del_route\n");
-
/* If the deleted route is the first in the node and it is not part of
* a multipath route, then we need to replace it with the next route
* in the node, if exists.
@@ -1955,7 +1970,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
read_lock(&net->ipv6.fib6_walker_lock);
FOR_WALKERS(net, w) {
if (w->state == FWS_C && w->leaf == rt) {
- RT6_TRACE("walker %p adjusted by delroute\n", w);
+ pr_debug("walker %p adjusted by delroute\n", w);
w->leaf = rcu_dereference_protected(rt->fib6_next,
lockdep_is_held(&table->tb6_lock));
if (!w->leaf)
@@ -2281,9 +2296,8 @@ static void fib6_flush_trees(struct net *net)
* Garbage collection
*/
-static int fib6_age(struct fib6_info *rt, void *arg)
+static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
{
- struct fib6_gc_args *gc_args = arg;
unsigned long now = jiffies;
/*
@@ -2293,7 +2307,7 @@ static int fib6_age(struct fib6_info *rt, void *arg)
if (rt->fib6_flags & RTF_EXPIRES && rt->expires) {
if (time_after(now, rt->expires)) {
- RT6_TRACE("expiring %p\n", rt);
+ pr_debug("expiring %p\n", rt);
return -1;
}
gc_args->more++;
@@ -2308,6 +2322,42 @@ static int fib6_age(struct fib6_info *rt, void *arg)
return 0;
}
+static void fib6_gc_table(struct net *net,
+ struct fib6_table *tb6,
+ struct fib6_gc_args *gc_args)
+{
+ struct fib6_info *rt;
+ struct hlist_node *n;
+ struct nl_info info = {
+ .nl_net = net,
+ .skip_notify = false,
+ };
+
+ hlist_for_each_entry_safe(rt, n, &tb6->tb6_gc_hlist, gc_link)
+ if (fib6_age(rt, gc_args) == -1)
+ fib6_del(rt, &info);
+}
+
+static void fib6_gc_all(struct net *net, struct fib6_gc_args *gc_args)
+{
+ struct fib6_table *table;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+ spin_lock_bh(&table->tb6_lock);
+
+ fib6_gc_table(net, table, gc_args);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
+ }
+ rcu_read_unlock();
+}
+
void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
struct fib6_gc_args gc_args;
@@ -2323,7 +2373,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
net->ipv6.sysctl.ip6_rt_gc_interval;
gc_args.more = 0;
- fib6_clean_all(net, fib6_age, &gc_args);
+ fib6_gc_all(net, &gc_args);
now = jiffies;
net->ipv6.ip6_rt_last_gc = now;
@@ -2383,6 +2433,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
+ INIT_HLIST_HEAD(&net->ipv6.fib6_main_tbl->tb6_gc_hlist);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
@@ -2395,6 +2446,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
+ INIT_HLIST_HEAD(&net->ipv6.fib6_local_tbl->tb6_gc_hlist);
#endif
fib6_tables_init(net);
@@ -2444,10 +2496,8 @@ int __init fib6_init(void)
{
int ret = -ENOMEM;
- fib6_node_kmem = kmem_cache_create("fib6_nodes",
- sizeof(struct fib6_node), 0,
- SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
- NULL);
+ fib6_node_kmem = KMEM_CACHE(fib6_node,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT);
if (!fib6_node_kmem)
goto out;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 070d87abf7c0..5e97e0aa8e07 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1511,6 +1511,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
ip6gre_tnl_init_features(dev);
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
cleanup_dst_cache_init:
@@ -1632,21 +1633,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
+static void __net_exit ip6gre_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- ip6gre_destroy_tunnels(net, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ ip6gre_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6gre_net_ops = {
.init = ip6gre_init_net,
- .exit_batch = ip6gre_exit_batch_net,
+ .exit_batch_rtnl = ip6gre_exit_batch_rtnl,
.id = &ip6gre_net_id,
.size = sizeof(struct ip6gre_net),
};
@@ -1903,6 +1902,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
ip6erspan_tnl_link_config(tunnel, 1);
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
cleanup_dst_cache_init:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index b8378814532c..133610a49da6 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -168,9 +168,9 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
SKB_DR_SET(reason, NOT_SPECIFIED);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
- !idev || unlikely(idev->cnf.disable_ipv6)) {
+ !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
- if (idev && unlikely(idev->cnf.disable_ipv6))
+ if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6)))
SKB_DR_SET(reason, IPV6DISABLED);
goto drop;
}
@@ -236,7 +236,7 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (!ipv6_addr_is_multicast(&hdr->daddr) &&
(skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) &&
- idev->cnf.drop_unicast_in_l2_multicast) {
+ READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) {
SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST);
goto err;
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index cca64c7809be..b41e35af69ea 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -419,14 +419,6 @@ static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
return inet_gro_complete(skb, nhoff);
}
-static struct packet_offload ipv6_packet_offload __read_mostly = {
- .type = cpu_to_be16(ETH_P_IPV6),
- .callbacks = {
- .gso_segment = ipv6_gso_segment,
- .gro_receive = ipv6_gro_receive,
- .gro_complete = ipv6_gro_complete,
- },
-};
static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
netdev_features_t features)
@@ -486,7 +478,15 @@ static int __init ipv6_offload_init(void)
if (ipv6_exthdrs_offload_init() < 0)
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
- dev_add_offload(&ipv6_packet_offload);
+ net_hotdata.ipv6_packet_offload = (struct packet_offload) {
+ .type = cpu_to_be16(ETH_P_IPV6),
+ .callbacks = {
+ .gso_segment = ipv6_gso_segment,
+ .gro_receive = ipv6_gro_receive,
+ .gro_complete = ipv6_gro_complete,
+ },
+ };
+ dev_add_offload(&net_hotdata.ipv6_packet_offload);
inet_add_offload(&sit_offload, IPPROTO_IPV6);
inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 31b86fe661aa..02eeca5492cd 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -234,7 +234,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
- if (unlikely(idev->cnf.disable_ipv6)) {
+ if (unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
return 0;
@@ -501,7 +501,7 @@ int ip6_forward(struct sk_buff *skb)
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
- if (net->ipv6.devconf_all->forwarding == 0)
+ if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
goto error;
if (skb->pkt_type != PACKET_HOST)
@@ -513,8 +513,8 @@ int ip6_forward(struct sk_buff *skb)
if (skb_warn_if_lro(skb))
goto drop;
- if (!net->ipv6.devconf_all->disable_policy &&
- (!idev || !idev->cnf.disable_policy) &&
+ if (!READ_ONCE(net->ipv6.devconf_all->disable_policy) &&
+ (!idev || !READ_ONCE(idev->cnf.disable_policy)) &&
!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
@@ -552,7 +552,7 @@ int ip6_forward(struct sk_buff *skb)
}
/* XXX: idev->cnf.proxy_ndp? */
- if (net->ipv6.devconf_all->proxy_ndp &&
+ if (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0) {
@@ -1925,7 +1925,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = cork->base.mark;
skb->tstamp = cork->base.transmit_time;
-
+ skb->mono_delivery_time = !!skb->tstamp;
ip6_cork_steal_dst(skb, cork);
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
if (proto == IPPROTO_ICMPV6) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9bbabf750a21..e9cc315832cb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -247,7 +247,6 @@ static void ip6_dev_free(struct net_device *dev)
gro_cells_destroy(&t->gro_cells);
dst_cache_destroy(&t->dst_cache);
- free_percpu(dev->tstats);
}
static int ip6_tnl_create2(struct net_device *dev)
@@ -1756,7 +1755,7 @@ int ip6_tnl_get_iflink(const struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- return t->parms.link;
+ return READ_ONCE(t->parms.link);
}
EXPORT_SYMBOL(ip6_tnl_get_iflink);
@@ -1848,6 +1847,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
dev->features |= NETIF_F_LLTX;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
dev->features |= IPXIPX_FEATURES;
@@ -1873,13 +1873,10 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
if (ret)
- goto free_stats;
+ return ret;
ret = gro_cells_init(&t->gro_cells, dev);
if (ret)
@@ -1898,13 +1895,11 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
destroy_dst:
dst_cache_destroy(&t->dst_cache);
-free_stats:
- free_percpu(dev->tstats);
- dev->tstats = NULL;
return ret;
}
@@ -2282,21 +2277,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
+static void __net_exit ip6_tnl_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- ip6_tnl_destroy_tunnels(net, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ ip6_tnl_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6_tnl_net_ops = {
.init = ip6_tnl_init_net,
- .exit_batch = ip6_tnl_exit_batch_net,
+ .exit_batch_rtnl = ip6_tnl_exit_batch_rtnl,
.id = &ip6_tnl_net_id,
.size = sizeof(struct ip6_tnl_net),
};
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index e550240c85e1..7f4f976aa24a 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -935,6 +935,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -1174,24 +1175,22 @@ err_alloc_dev:
return err;
}
-static void __net_exit vti6_exit_batch_net(struct list_head *net_list)
+static void __net_exit vti6_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct vti6_net *ip6n;
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
ip6n = net_generic(net, vti6_net_id);
- vti6_destroy_tunnels(ip6n, &list);
+ vti6_destroy_tunnels(ip6n, dev_to_kill);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations vti6_net_ops = {
.init = vti6_init_net,
- .exit_batch = vti6_exit_batch_net,
+ .exit_batch_rtnl = vti6_exit_batch_rtnl,
.id = &vti6_net_id,
.size = sizeof(struct vti6_net),
};
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9782c180fee6..cb0ee81a068a 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1373,10 +1373,7 @@ int __init ip6_mr_init(void)
{
int err;
- mrt_cachep = kmem_cache_create("ip6_mrt_cache",
- sizeof(struct mfc6_cache),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ mrt_cachep = KMEM_CACHE(mfc6_cache, SLAB_HWCACHE_ALIGN);
if (!mrt_cachep)
return -ENOMEM;
@@ -2595,7 +2592,9 @@ static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
int err;
if (cb->strict_check) {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 56c3c467f9de..d4c28ec1bc51 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -948,6 +948,8 @@ done:
if (optlen < sizeof(int))
goto e_inval;
retv = ip6_ra_control(sk, val);
+ if (retv == 0)
+ inet6_assign_bit(RTALERT, sk, valbool);
break;
case IPV6_FLOWLABEL_MGR:
retv = ipv6_flowlabel_opt(sk, optval, optlen);
@@ -1346,7 +1348,7 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
if (val < 0)
- val = sock_net(sk)->ipv6.devconf_all->hop_limit;
+ val = READ_ONCE(sock_net(sk)->ipv6.devconf_all->hop_limit);
break;
}
@@ -1445,6 +1447,10 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->rxopt.bits.recvfragsize;
break;
+ case IPV6_ROUTER_ALERT:
+ val = inet6_test_bit(RTALERT, sk);
+ break;
+
case IPV6_ROUTER_ALERT_ISOLATE:
val = inet6_test_bit(RTALERT_ISOLATE, sk);
break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index bc6e0a0bad3c..7ba01d8cfbae 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -159,9 +159,9 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
int iv;
if (mld_in_v1_mode(idev))
- iv = idev->cnf.mldv1_unsolicited_report_interval;
+ iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
else
- iv = idev->cnf.mldv2_unsolicited_report_interval;
+ iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
return iv > 0 ? iv : 1;
}
@@ -1202,15 +1202,15 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
static int mld_force_mld_version(const struct inet6_dev *idev)
{
+ const struct net *net = dev_net(idev->dev);
+ int all_force;
+
+ all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
/* Normally, both are 0 here. If enforcement to a particular is
* being used, individual device enforcement will have a lower
* precedence over 'all' device (.../conf/all/force_mld_version).
*/
-
- if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
- return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
- else
- return idev->cnf.force_mld_version;
+ return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
}
static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
@@ -2719,7 +2719,6 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Should stop work after group drop. or we will
* start work again in mld_ifc_event()
*/
- synchronize_net();
mld_query_stop_work(idev);
mld_report_stop_work(idev);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index a19999b30bc0..ae134634c323 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -451,7 +451,7 @@ static void ip6_nd_hdr(struct sk_buff *skb,
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
- tclass = idev ? idev->cnf.ndisc_tclass : 0;
+ tclass = idev ? READ_ONCE(idev->cnf.ndisc_tclass) : 0;
rcu_read_unlock();
skb_push(skb, sizeof(*hdr));
@@ -535,7 +535,7 @@ void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
src_addr = solicited_addr;
if (ifp->flags & IFA_F_OPTIMISTIC)
override = false;
- inc_opt |= ifp->idev->cnf.force_tllao;
+ inc_opt |= READ_ONCE(ifp->idev->cnf.force_tllao);
in6_ifa_put(ifp);
} else {
if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
@@ -903,8 +903,9 @@ have_ifp:
}
if (ipv6_chk_acast_addr(net, dev, &msg->target) ||
- (idev->cnf.forwarding &&
- (net->ipv6.devconf_all->proxy_ndp || idev->cnf.proxy_ndp) &&
+ (READ_ONCE(idev->cnf.forwarding) &&
+ (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) ||
+ READ_ONCE(idev->cnf.proxy_ndp)) &&
(is_router = pndisc_is_router(&msg->target, dev)) >= 0)) {
if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
skb->pkt_type != PACKET_HOST &&
@@ -929,7 +930,7 @@ have_ifp:
}
if (is_router < 0)
- is_router = idev->cnf.forwarding;
+ is_router = READ_ONCE(idev->cnf.forwarding);
if (dad) {
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &msg->target,
@@ -973,7 +974,7 @@ static int accept_untracked_na(struct net_device *dev, struct in6_addr *saddr)
{
struct inet6_dev *idev = __in6_dev_get(dev);
- switch (idev->cnf.accept_untracked_na) {
+ switch (READ_ONCE(idev->cnf.accept_untracked_na)) {
case 0: /* Don't accept untracked na (absent in neighbor cache) */
return 0;
case 1: /* Create new entries from na if currently untracked */
@@ -1024,7 +1025,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* drop_unsolicited_na takes precedence over accept_untracked_na
*/
if (!msg->icmph.icmp6_solicited && idev &&
- idev->cnf.drop_unsolicited_na)
+ READ_ONCE(idev->cnf.drop_unsolicited_na))
return reason;
if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts))
@@ -1080,7 +1081,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* Note that we don't do a (daddr == all-routers-mcast) check.
*/
new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE;
- if (!neigh && lladdr && idev && idev->cnf.forwarding) {
+ if (!neigh && lladdr && idev && READ_ONCE(idev->cnf.forwarding)) {
if (accept_untracked_na(dev, saddr)) {
neigh = neigh_create(&nd_tbl, &msg->target, dev);
new_state = NUD_STALE;
@@ -1100,7 +1101,8 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* has already sent a NA to us.
*/
if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) &&
- net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp &&
+ READ_ONCE(net->ipv6.devconf_all->forwarding) &&
+ READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) {
/* XXX: idev->cnf.proxy_ndp */
goto out;
@@ -1148,7 +1150,7 @@ static enum skb_drop_reason ndisc_recv_rs(struct sk_buff *skb)
}
/* Don't accept RS if we're not in router mode */
- if (!idev->cnf.forwarding)
+ if (!READ_ONCE(idev->cnf.forwarding))
goto out;
/*
@@ -1237,6 +1239,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
struct ndisc_options ndopts;
struct fib6_info *rt = NULL;
struct inet6_dev *in6_dev;
+ struct fib6_table *table;
u32 defrtr_usr_metric;
unsigned int pref = 0;
__u32 old_if_flags;
@@ -1317,7 +1320,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
if (old_if_flags != in6_dev->if_flags)
send_ifinfo_notify = true;
- if (!in6_dev->cnf.accept_ra_defrtr) {
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_defrtr)) {
ND_PRINTK(2, info,
"RA: %s, defrtr is false for dev: %s\n",
__func__, skb->dev->name);
@@ -1325,7 +1328,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
- if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
+ if (lifetime != 0 &&
+ lifetime < READ_ONCE(in6_dev->cnf.accept_ra_min_lft)) {
ND_PRINTK(2, info,
"RA: router lifetime (%ds) is too short: %s\n",
lifetime, skb->dev->name);
@@ -1336,7 +1340,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
* accept_ra_from_local is set to true.
*/
net = dev_net(in6_dev->dev);
- if (!in6_dev->cnf.accept_ra_from_local &&
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(net, &ipv6_hdr(skb)->saddr, in6_dev->dev, 0)) {
ND_PRINTK(2, info,
"RA from local address detected on dev: %s: default router ignored\n",
@@ -1348,7 +1352,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
pref = ra_msg->icmph.icmp6_router_pref;
/* 10b is handled as if it were 00b (medium) */
if (pref == ICMPV6_ROUTER_PREF_INVALID ||
- !in6_dev->cnf.accept_ra_rtr_pref)
+ !READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref))
pref = ICMPV6_ROUTER_PREF_MEDIUM;
#endif
/* routes added from RAs do not use nexthop objects */
@@ -1382,7 +1386,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
neigh_release(neigh);
rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
- skb->dev, pref, defrtr_usr_metric);
+ skb->dev, pref, defrtr_usr_metric,
+ lifetime);
if (!rt) {
ND_PRINTK(0, err,
"RA: %s failed to add default route\n",
@@ -1409,12 +1414,21 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
inet6_rt_notify(RTM_NEWROUTE, rt, &nlinfo, NLM_F_REPLACE);
}
- if (rt)
+ if (rt) {
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
fib6_set_expires(rt, jiffies + (HZ * lifetime));
- if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+ fib6_add_gc_list(rt);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
+ if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) < 256 &&
ra_msg->icmph.icmp6_hop_limit) {
- if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) <=
+ ra_msg->icmph.icmp6_hop_limit) {
+ WRITE_ONCE(in6_dev->cnf.hop_limit,
+ ra_msg->icmph.icmp6_hop_limit);
fib6_metric_set(rt, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
} else {
@@ -1496,7 +1510,7 @@ skip_linkparms:
}
#ifdef CONFIG_IPV6_ROUTE_INFO
- if (!in6_dev->cnf.accept_ra_from_local &&
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
in6_dev->dev, 0)) {
ND_PRINTK(2, info,
@@ -1505,7 +1519,7 @@ skip_linkparms:
goto skip_routeinfo;
}
- if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
+ if (READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref) && ndopts.nd_opts_ri) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_ri;
p;
@@ -1517,14 +1531,14 @@ skip_linkparms:
continue;
#endif
if (ri->prefix_len == 0 &&
- !in6_dev->cnf.accept_ra_defrtr)
+ !READ_ONCE(in6_dev->cnf.accept_ra_defrtr))
continue;
if (ri->lifetime != 0 &&
- ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
+ ntohl(ri->lifetime) < READ_ONCE(in6_dev->cnf.accept_ra_min_lft))
continue;
- if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+ if (ri->prefix_len < READ_ONCE(in6_dev->cnf.accept_ra_rt_info_min_plen))
continue;
- if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
+ if (ri->prefix_len > READ_ONCE(in6_dev->cnf.accept_ra_rt_info_max_plen))
continue;
rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
&ipv6_hdr(skb)->saddr);
@@ -1544,7 +1558,7 @@ skip_routeinfo:
}
#endif
- if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
+ if (READ_ONCE(in6_dev->cnf.accept_ra_pinfo) && ndopts.nd_opts_pi) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_pi;
p;
@@ -1555,7 +1569,7 @@ skip_routeinfo:
}
}
- if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) {
+ if (ndopts.nd_opts_mtu && READ_ONCE(in6_dev->cnf.accept_ra_mtu)) {
__be32 n;
u32 mtu;
@@ -1569,8 +1583,8 @@ skip_routeinfo:
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
- } else if (in6_dev->cnf.mtu6 != mtu) {
- in6_dev->cnf.mtu6 = mtu;
+ } else if (READ_ONCE(in6_dev->cnf.mtu6) != mtu) {
+ WRITE_ONCE(in6_dev->cnf.mtu6, mtu);
fib6_metric_set(rt, RTAX_MTU, mtu);
rt6_mtu_change(skb->dev, mtu);
}
@@ -1804,7 +1818,7 @@ static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
if (!idev)
return true;
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED &&
- idev->cnf.suppress_frag_ndisc) {
+ READ_ONCE(idev->cnf.suppress_frag_ndisc)) {
net_warn_ratelimited("Received fragmented ndisc packet. Carefully consider disabling suppress_frag_ndisc.\n");
return true;
}
@@ -1881,8 +1895,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
idev = in6_dev_get(dev);
if (!idev)
break;
- if (idev->cnf.ndisc_notify ||
- net->ipv6.devconf_all->ndisc_notify)
+ if (READ_ONCE(idev->cnf.ndisc_notify) ||
+ READ_ONCE(net->ipv6.devconf_all->ndisc_notify))
ndisc_send_unsol_na(dev);
in6_dev_put(idev);
break;
@@ -1891,8 +1905,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
if (!idev)
evict_nocarrier = true;
else {
- evict_nocarrier = idev->cnf.ndisc_evict_nocarrier &&
- net->ipv6.devconf_all->ndisc_evict_nocarrier;
+ evict_nocarrier = READ_ONCE(idev->cnf.ndisc_evict_nocarrier) &&
+ READ_ONCE(net->ipv6.devconf_all->ndisc_evict_nocarrier);
in6_dev_put(idev);
}
@@ -1966,7 +1980,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer,
if (ctl->data == &NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME))
idev->nd_parms->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME));
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
inet6_ifinfo_notify(RTM_NEWLINK, idev);
in6_dev_put(idev);
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 0ba62f4868f9..f3c8e2d918e1 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -6,6 +6,10 @@
menu "IPv6: Netfilter Configuration"
depends on INET && IPV6 && NETFILTER
+# old sockopt interface and eval loop
+config IP6_NF_IPTABLES_LEGACY
+ tristate
+
config NF_SOCKET_IPV6
tristate "IPv6 socket lookup support"
help
@@ -147,7 +151,7 @@ config IP6_NF_MATCH_MH
config IP6_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED
- depends on IP6_NF_MANGLE || IP6_NF_RAW
+ depends on IP6_NF_MANGLE || IP6_NF_RAW || NFT_COMPAT
help
This option allows you to match packets whose replies would
go out via the interface the packet came in.
@@ -186,6 +190,8 @@ config IP6_NF_TARGET_HL
config IP6_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
+ select IP6_NF_IPTABLES_LEGACY
+ tristate
help
Packet filtering defines a table `filter', which has a series of
rules for simple packet filtering at local input, forwarding and
@@ -195,7 +201,7 @@ config IP6_NF_FILTER
config IP6_NF_TARGET_REJECT
tristate "REJECT target support"
- depends on IP6_NF_FILTER
+ depends on IP6_NF_FILTER || NFT_COMPAT
select NF_REJECT_IPV6
default m if NETFILTER_ADVANCED=n
help
@@ -221,6 +227,7 @@ config IP6_NF_TARGET_SYNPROXY
config IP6_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -230,6 +237,7 @@ config IP6_NF_MANGLE
config IP6_NF_RAW
tristate 'raw table support (required for TRACE)'
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to ip6tables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -243,6 +251,7 @@ config IP6_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -254,6 +263,7 @@ config IP6_NF_NAT
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
select NF_NAT
+ select IP6_NF_IPTABLES_LEGACY
select NETFILTER_XT_NAT
help
This enables the `nat' table in ip6tables. This allows masquerading,
@@ -262,25 +272,23 @@ config IP6_NF_NAT
To compile it as a module, choose M here. If unsure, say N.
-if IP6_NF_NAT
-
config IP6_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
select NETFILTER_XT_TARGET_MASQUERADE
+ depends on IP6_NF_NAT
help
This is a backwards-compat option for the user's convenience
(e.g. when running oldconfig). It selects NETFILTER_XT_TARGET_MASQUERADE.
config IP6_NF_TARGET_NPT
tristate "NPT (Network Prefix translation) target support"
+ depends on IP6_NF_NAT || NFT_COMPAT
help
This option adds the `SNPT' and `DNPT' target, which perform
stateless IPv6-to-IPv6 Network Prefix Translation per RFC 6296.
To compile it as a module, choose M here. If unsure, say N.
-endif # IP6_NF_NAT
-
endif # IP6_NF_IPTABLES
endmenu
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index b8d6dc9aeeb6..66ce6fa5b2f5 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -4,7 +4,7 @@
#
# Link order matters here.
-obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
+obj-$(CONFIG_IP6_NF_IPTABLES_LEGACY) += ip6_tables.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index b2dd48911c8d..1a51a44571c3 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -327,9 +327,9 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
if (!reasm_data)
goto err;
- payload_len = ((skb->data - skb_network_header(skb)) -
+ payload_len = -skb_network_offset(skb) -
sizeof(struct ipv6hdr) + fq->q.len -
- sizeof(struct frag_hdr));
+ sizeof(struct frag_hdr);
if (payload_len > IPV6_MAXPLEN) {
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
payload_len);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 196dd4ecb5e2..dedee264b8f6 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -83,7 +83,7 @@ struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
- net->ipv6.devconf_all->hop_limit);
+ READ_ONCE(net->ipv6.devconf_all->hop_limit));
nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
@@ -124,7 +124,7 @@ struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
- net->ipv6.devconf_all->hop_limit);
+ READ_ONCE(net->ipv6.devconf_all->hop_limit));
skb_reset_transport_header(nskb);
icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index b5205311f372..806d4b5dd1e6 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -111,9 +111,9 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
- hoplimit = idev->cnf.hop_limit;
+ hoplimit = READ_ONCE(idev->cnf.hop_limit);
else
- hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
+ hoplimit = READ_ONCE(dev_net(dev)->ipv6.devconf_all->hop_limit);
rcu_read_unlock();
}
return hoplimit;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 03dbb874c363..ca49e6617afa 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -160,6 +160,13 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
inet6_iif(skb), inet6_sdif(skb)))
continue;
+
+ if (atomic_read(&sk->sk_rmem_alloc) >=
+ READ_ONCE(sk->sk_rcvbuf)) {
+ atomic_inc(&sk->sk_drops);
+ continue;
+ }
+
delivered = true;
switch (nexthdr) {
case IPPROTO_ICMPV6:
@@ -288,8 +295,7 @@ out:
}
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
- struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
bool recverr = inet6_test_bit(RECVERR6, sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -344,7 +350,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
if (!raw_v6_match(net, sk, nexthdr, &ip6h->saddr, &ip6h->daddr,
inet6_iif(skb), inet6_iif(skb)))
continue;
- rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
+ rawv6_err(sk, skb, type, code, inner_offset, info);
}
rcu_read_unlock();
}
@@ -616,7 +622,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc->mark;
skb->tstamp = sockc->transmit_time;
-
+ skb->mono_delivery_time = !!skb->tstamp;
skb_put(skb, length);
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
@@ -935,7 +941,7 @@ do_confirm:
goto done;
}
-static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
+static int rawv6_seticmpfilter(struct sock *sk, int optname,
sockptr_t optval, int optlen)
{
switch (optname) {
@@ -952,7 +958,7 @@ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
return 0;
}
-static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+static int rawv6_geticmpfilter(struct sock *sk, int optname,
char __user *optval, int __user *optlen)
{
int len;
@@ -1038,7 +1044,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
- return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
+ return rawv6_seticmpfilter(sk, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
@@ -1099,7 +1105,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
- return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
+ return rawv6_geticmpfilter(sk, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5ebc47da1000..acb4f119e11f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -272,9 +272,9 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
if (!reasm_data)
goto out_oom;
- payload_len = ((skb->data - skb_network_header(skb)) -
+ payload_len = -skb_network_offset(skb) -
sizeof(struct ipv6hdr) + fq->q.len -
- sizeof(struct frag_hdr));
+ sizeof(struct frag_hdr);
if (payload_len > IPV6_MAXPLEN)
goto out_oversize;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ef815ba583a8..1f4b935a0e57 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -645,14 +645,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
write_lock_bh(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies,
- neigh->updated + idev->cnf.rtr_probe_interval)) {
+ neigh->updated +
+ READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work)
__neigh_set_probe_once(neigh);
}
write_unlock_bh(&neigh->lock);
} else if (time_after(jiffies, last_probe +
- idev->cnf.rtr_probe_interval)) {
+ READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
}
@@ -931,6 +932,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
struct net *net = dev_net(dev);
struct route_info *rinfo = (struct route_info *) opt;
struct in6_addr prefix_buf, *prefix;
+ struct fib6_table *table;
unsigned int pref;
unsigned long lifetime;
struct fib6_info *rt;
@@ -989,10 +991,18 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
(rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
if (rt) {
- if (!addrconf_finite_timeout(lifetime))
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (!addrconf_finite_timeout(lifetime)) {
fib6_clean_expires(rt);
- else
+ fib6_remove_gc_list(rt);
+ } else {
fib6_set_expires(rt, jiffies + HZ * lifetime);
+ fib6_add_gc_list(rt);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
fib6_info_release(rt);
}
@@ -1587,7 +1597,7 @@ static unsigned int fib6_mtu(const struct fib6_result *res)
rcu_read_lock();
idev = __in6_dev_get(dev);
- mtu = idev->cnf.mtu6;
+ mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
}
@@ -2085,12 +2095,12 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
*/
if (!(rt->rt6i_flags & RTF_EXPIRES)) {
if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
- RT6_TRACE("aging clone %p\n", rt);
+ pr_debug("aging clone %p\n", rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
} else if (time_after(jiffies, rt->dst.expires)) {
- RT6_TRACE("purging expired route %p\n", rt);
+ pr_debug("purging expired route %p\n", rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
@@ -2101,8 +2111,8 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
if (!(neigh && (neigh->flags & NTF_ROUTER))) {
- RT6_TRACE("purging route %p via non-router but gateway\n",
- rt);
+ pr_debug("purging route %p via non-router but gateway\n",
+ rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
@@ -2211,7 +2221,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
strict |= flags & RT6_LOOKUP_F_IFACE;
strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
- if (net->ipv6.devconf_all->forwarding == 0)
+ if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
rcu_read_lock();
@@ -3240,8 +3250,8 @@ u32 ip6_mtu_from_fib6(const struct fib6_result *res,
mtu = IPV6_MIN_MTU;
idev = __in6_dev_get(dev);
- if (idev && idev->cnf.mtu6 > mtu)
- mtu = idev->cnf.mtu6;
+ if (idev)
+ mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
}
mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
@@ -3765,8 +3775,6 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (cfg->fc_flags & RTF_EXPIRES)
fib6_set_expires(rt, jiffies +
clock_t_to_jiffies(cfg->fc_expires));
- else
- fib6_clean_expires(rt);
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
@@ -4142,7 +4150,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
in6_dev = __in6_dev_get(skb->dev);
if (!in6_dev)
return;
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
+ if (READ_ONCE(in6_dev->cnf.forwarding) ||
+ !READ_ONCE(in6_dev->cnf.accept_redirects))
return;
/* RFC2461 8.1:
@@ -4355,7 +4364,8 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev,
unsigned int pref,
- u32 defrtr_usr_metric)
+ u32 defrtr_usr_metric,
+ int lifetime)
{
struct fib6_config cfg = {
.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
@@ -4368,6 +4378,7 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
.fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = net,
+ .fc_expires = jiffies_to_clock_t(lifetime * HZ),
};
cfg.fc_gateway = *gwaddr;
@@ -4574,8 +4585,8 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
f6i->dst_nocount = true;
if (!anycast &&
- (net->ipv6.devconf_all->disable_policy ||
- idev->cnf.disable_policy))
+ (READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
+ READ_ONCE(idev->cnf.disable_policy)))
f6i->dst_nopolicy = true;
}
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index d43c50a7310d..861e0366f549 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -241,6 +241,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
struct sr6_tlv_hmac *tlv;
struct ipv6_sr_hdr *srh;
struct inet6_dev *idev;
+ int require_hmac;
idev = __in6_dev_get(skb->dev);
@@ -248,16 +249,17 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
tlv = seg6_get_tlv_hmac(srh);
+ require_hmac = READ_ONCE(idev->cnf.seg6_require_hmac);
/* mandatory check but no tlv */
- if (idev->cnf.seg6_require_hmac > 0 && !tlv)
+ if (require_hmac > 0 && !tlv)
return false;
/* no check */
- if (idev->cnf.seg6_require_hmac < 0)
+ if (require_hmac < 0)
return true;
/* check only if present */
- if (idev->cnf.seg6_require_hmac == 0 && !tlv)
+ if (require_hmac == 0 && !tlv)
return true;
/* now, seg6_require_hmac >= 0 && tlv */
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 5e9f625b76e3..655c9b1a19b8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1398,7 +1398,6 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_uninit = ipip6_tunnel_uninit,
.ndo_start_xmit = sit_tunnel_xmit,
.ndo_siocdevprivate = ipip6_tunnel_siocdevprivate,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_tunnel_ctl = ipip6_tunnel_ctl,
};
@@ -1408,7 +1407,6 @@ static void ipip6_dev_free(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
dst_cache_destroy(&tunnel->dst_cache);
- free_percpu(dev->tstats);
}
#define SIT_FEATURES (NETIF_F_SG | \
@@ -1437,6 +1435,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->features |= SIT_FEATURES;
dev->hw_features |= SIT_FEATURES;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
}
static int ipip6_tunnel_init(struct net_device *dev)
@@ -1449,17 +1449,13 @@ static int ipip6_tunnel_init(struct net_device *dev)
strcpy(tunnel->parms.name, dev->name);
ipip6_tunnel_bind_dev(dev);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
- if (err) {
- free_percpu(dev->tstats);
- dev->tstats = NULL;
+ if (err)
return err;
- }
+
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -1875,22 +1871,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit sit_exit_batch_net(struct list_head *net_list)
+static void __net_exit sit_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- LIST_HEAD(list);
struct net *net;
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- sit_destroy_tunnels(net, &list);
-
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ sit_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations sit_net_ops = {
.init = sit_init_net,
- .exit_batch = sit_exit_batch_net,
+ .exit_batch_rtnl = sit_exit_batch_rtnl,
.id = &sit_net_id,
.size = sizeof(struct sit_net),
};
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index c8d2ca27220c..8bad0a44a0a6 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -177,24 +177,33 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
struct sock *ret = sk;
__u8 rcv_wscale;
int full_space;
+ SKB_DR(reason);
if (!READ_ONCE(net->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
- if (IS_ERR(req))
- goto out;
- if (!req)
+ if (cookie_bpf_ok(skb)) {
+ req = cookie_bpf_check(sk, skb);
+ } else {
+ req = cookie_tcp_check(net, sk, skb);
+ if (IS_ERR(req))
+ goto out;
+ }
+ if (!req) {
+ SKB_DR_SET(reason, NO_SOCKET);
goto out_drop;
+ }
ireq = inet_rsk(req);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
- if (security_inet_conn_request(sk, skb, req))
+ if (security_inet_conn_request(sk, skb, req)) {
+ SKB_DR_SET(reason, SECURITY_HOOK);
goto out_free;
+ }
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -231,8 +240,10 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
- if (IS_ERR(dst))
+ if (IS_ERR(dst)) {
+ SKB_DR_SET(reason, IP_OUTNOROUTES);
goto out_free;
+ }
}
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
@@ -247,14 +258,20 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, dst);
ret = tcp_get_cookie_sock(sk, skb, req, dst);
+ if (!ret) {
+ SKB_DR_SET(reason, NO_SOCKET);
+ goto out_drop;
+ }
out:
return ret;
out_free:
reqsk_free(req);
out_drop:
+ kfree_skb_reason(skb, reason);
return NULL;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 57b25b1fc9d9..3f4cba49e9ee 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -58,6 +58,7 @@
#include <net/timewait_sock.h>
#include <net/inet_common.h>
#include <net/secure_seq.h>
+#include <net/hotdata.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
@@ -1623,7 +1624,6 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (np->rxopt.all)
opt_skb = skb_clone_and_charge_r(skb, sk);
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
struct dst_entry *dst;
@@ -1653,12 +1653,12 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v6_cookie_check(sk, skb);
- if (!nsk)
- goto discard;
-
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb))
- goto reset;
+ if (nsk) {
+ reason = tcp_child_process(sk, nsk, skb);
+ if (reason)
+ goto reset;
+ }
if (opt_skb)
__kfree_skb(opt_skb);
return 0;
@@ -1666,7 +1666,8 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb))
+ reason = tcp_rcv_state_process(sk, skb);
+ if (reason)
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
@@ -1856,10 +1857,12 @@ process:
if (nsk == sk) {
reqsk_put(req);
tcp_v6_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
- tcp_v6_send_reset(nsk, skb);
- goto discard_and_relse;
} else {
+ drop_reason = tcp_child_process(sk, nsk, skb);
+ if (drop_reason) {
+ tcp_v6_send_reset(nsk, skb);
+ goto discard_and_relse;
+ }
sock_put(sk);
return 0;
}
@@ -2365,11 +2368,6 @@ struct proto tcpv6_prot = {
};
EXPORT_SYMBOL_GPL(tcpv6_prot);
-static const struct inet6_protocol tcpv6_protocol = {
- .handler = tcp_v6_rcv,
- .err_handler = tcp_v6_err,
- .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-};
static struct inet_protosw tcpv6_protosw = {
.type = SOCK_STREAM,
@@ -2406,7 +2404,12 @@ int __init tcpv6_init(void)
{
int ret;
- ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
+ .handler = tcp_v6_rcv,
+ .err_handler = tcp_v6_err,
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+ ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
if (ret)
goto out;
@@ -2431,7 +2434,7 @@ out_tcpv6_pernet_subsys:
out_tcpv6_protosw:
inet6_unregister_protosw(&tcpv6_protosw);
out_tcpv6_protocol:
- inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
goto out;
}
@@ -2439,5 +2442,5 @@ void tcpv6_exit(void)
{
unregister_pernet_subsys(&tcpv6_net_ops);
inet6_unregister_protosw(&tcpv6_protosw);
- inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index bf0c957e4b5e..4b07d1e6c952 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -66,15 +66,15 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
return tcp_gso_segment(skb, features);
}
-static const struct net_offload tcpv6_offload = {
- .callbacks = {
- .gso_segment = tcp6_gso_segment,
- .gro_receive = tcp6_gro_receive,
- .gro_complete = tcp6_gro_complete,
- },
-};
int __init tcpv6_offload_init(void)
{
- return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
+ net_hotdata.tcpv6_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = tcp6_gso_segment,
+ .gro_receive = tcp6_gro_receive,
+ .gro_complete = tcp6_gro_complete,
+ },
+ };
+ return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3f2249b4cd5f..7c1e6469d091 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -79,9 +79,6 @@ u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *faddr,
const __be16 fport)
{
- static u32 udp6_ehash_secret __read_mostly;
- static u32 udp_ipv6_hash_secret __read_mostly;
-
u32 lhash, fhash;
net_get_random_once(&udp6_ehash_secret,
@@ -1101,11 +1098,12 @@ void udp_v6_early_demux(struct sk_buff *skb)
else
return;
- if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
+ if (!sk)
return;
skb->sk = sk;
- skb->destructor = sock_efree;
+ DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
+ skb->destructor = sock_pfree;
dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
@@ -1702,11 +1700,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
-static const struct inet6_protocol udpv6_protocol = {
- .handler = udpv6_rcv,
- .err_handler = udpv6_err,
- .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
@@ -1803,7 +1796,12 @@ int __init udpv6_init(void)
{
int ret;
- ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
+ net_hotdata.udpv6_protocol = (struct inet6_protocol) {
+ .handler = udpv6_rcv,
+ .err_handler = udpv6_err,
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+ ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
@@ -1814,12 +1812,12 @@ out:
return ret;
out_udpv6_protocol:
- inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
+ inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
- inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
+ inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
}
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 6b95ba241ebe..312bcaeea96f 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -192,20 +192,19 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
}
-static const struct net_offload udpv6_offload = {
- .callbacks = {
- .gso_segment = udp6_ufo_fragment,
- .gro_receive = udp6_gro_receive,
- .gro_complete = udp6_gro_complete,
- },
-};
-
-int udpv6_offload_init(void)
+int __init udpv6_offload_init(void)
{
- return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
+ net_hotdata.udpv6_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = udp6_ufo_fragment,
+ .gro_receive = udp6_gro_receive,
+ .gro_complete = udp6_gro_complete,
+ },
+ };
+ return inet6_add_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}
int udpv6_offload_exit(void)
{
- return inet6_del_offload(&udpv6_offload, IPPROTO_UDP);
+ return inet6_del_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 6e36e5047fba..a17d783dc7c0 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -43,7 +43,7 @@ static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
int xfrm6_transport_finish(struct sk_buff *skb, int async)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- int nhlen = skb->data - skb_network_header(skb);
+ int nhlen = -skb_network_offset(skb);
skb_network_header(skb)[IP6CB(skb)->nhoff] =
XFRM_MODE_SKB_CB(skb)->protocol;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f6cb94f82cc3..bf140ef781c1 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -355,10 +355,7 @@ static int __init xfrm6_tunnel_init(void)
{
int rv;
- xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
- sizeof(struct xfrm6_tunnel_spi),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ xfrm6_tunnel_spi_kmem = KMEM_CACHE(xfrm6_tunnel_spi, SLAB_HWCACHE_ALIGN);
if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 498a0c35b7bb..4aa1c72e6c49 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1060,13 +1060,12 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
int i;
/* skip iucv_array lying in the headroom */
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = (u32)virt_to_phys(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
err = pr_iucv->message_send(iucv->path, &txmsg,
@@ -1162,13 +1161,12 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = (u32)virt_to_phys(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
rc = pr_iucv->message_receive(path, msg,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index b0b3e9c5af44..5b56ae6612dd 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -286,6 +286,7 @@ static union iucv_param *iucv_param_irq[NR_CPUS];
*/
static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
{
+ unsigned long reg1 = virt_to_phys(parm);
int cc;
asm volatile(
@@ -296,7 +297,7 @@ static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
" srl %[cc],28\n"
: [cc] "=&d" (cc), "+m" (*parm)
: [reg0] "d" ((unsigned long)command),
- [reg1] "d" ((unsigned long)parm)
+ [reg1] "d" (reg1)
: "cc", "0", "1");
return cc;
}
@@ -1123,7 +1124,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ipmsgid = msg->id;
parm->db.ippathid = path->pathid;
@@ -1241,7 +1242,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) reply;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(reply);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags;
@@ -1293,7 +1294,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.ipmsgtag = msg->tag;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags | IUCV_IPNORPY;
@@ -1378,7 +1379,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
parm->dpl.ipsrccls = srccls;
parm->dpl.ipmsgtag = msg->tag;
- parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
+ parm->dpl.ipbfadr2 = (u32)virt_to_phys(answer);
parm->dpl.ipbfln2f = (u32) asize;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
@@ -1387,9 +1388,9 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->db.iptrgcls = msg->class;
parm->db.ipsrccls = srccls;
parm->db.ipmsgtag = msg->tag;
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(buffer);
parm->db.ipbfln1f = (u32) size;
- parm->db.ipbfadr2 = (u32)(addr_t) answer;
+ parm->db.ipbfadr2 = (u32)virt_to_phys(answer);
parm->db.ipbfln2f = (u32) asize;
}
rc = iucv_call_b2f0(IUCV_SEND, parm);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 1184d40167b8..2f191e50d4fc 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -627,7 +627,8 @@ retry:
skb = txm->frag_skb;
}
- if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
+ if (WARN_ON(!skb_shinfo(skb)->nr_frags) ||
+ WARN_ON_ONCE(!skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
ret = -EINVAL;
goto out;
}
@@ -637,8 +638,8 @@ retry:
msize += skb_frag_size(&skb_shinfo(skb)->frags[i]);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE,
- skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags,
- msize);
+ (const struct bio_vec *)skb_shinfo(skb)->frags,
+ skb_shinfo(skb)->nr_frags, msize);
iov_iter_advance(&msg.msg_iter, txm->frag_offset);
do {
@@ -1152,10 +1153,11 @@ static int kcm_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case KCM_RECV_DISABLE:
val = kcm->rx_disabled;
@@ -1877,15 +1879,11 @@ static int __init kcm_init(void)
{
int err = -ENOMEM;
- kcm_muxp = kmem_cache_create("kcm_mux_cache",
- sizeof(struct kcm_mux), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ kcm_muxp = KMEM_CACHE(kcm_mux, SLAB_HWCACHE_ALIGN);
if (!kcm_muxp)
goto fail;
- kcm_psockp = kmem_cache_create("kcm_psock_cache",
- sizeof(struct kcm_psock), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ kcm_psockp = KMEM_CACHE(kcm_psock, SLAB_HWCACHE_ALIGN);
if (!kcm_psockp)
goto fail;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 25ca89f80414..39e487ccc468 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -100,7 +100,7 @@ static const struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static struct device_type l2tpeth_type = {
+static const struct device_type l2tpeth_type = {
.name = "l2tpeth",
};
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 9a2a9ed3ba47..970af3983d11 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -478,7 +478,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
rt = ip_route_output_ports(sock_net(sk), fl4, sk,
daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
- sk->sk_protocol, RT_CONN_FLAGS(sk),
+ sk->sk_protocol, ip_sock_rt_tos(sk),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f011af6601c9..6146e4e67bbb 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1356,11 +1356,11 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
err = -ENOTCONN;
if (!sk->sk_user_data)
goto end;
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4406b4f8f3b9..a33884967f21 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -29,7 +29,7 @@ mac80211-y := \
spectmgmt.o \
tx.o \
key.o \
- util.o \
+ util.o parse.o \
wme.o \
chan.o \
trace.o mlme.o \
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b8a278355e18..21d55dc539f6 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -616,7 +616,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if (!pubsta->deflink.ht_cap.ht_supported &&
- sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ)
+ sta->sdata->vif.bss_conf.chanreq.oper.chan->band != NL80211_BAND_6GHZ)
return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action))
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 327682995c92..f03452dc716d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -886,33 +886,32 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
- int ret = 0;
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
+ int ret;
lockdep_assert_wiphy(local->hw.wiphy);
- if (cfg80211_chandef_identical(&local->monitor_chandef, chandef))
+ if (cfg80211_chandef_identical(&local->monitor_chanreq.oper,
+ &chanreq.oper))
return 0;
- if (local->use_chanctx) {
- sdata = wiphy_dereference(local->hw.wiphy,
- local->monitor_sdata);
- if (sdata) {
- ieee80211_link_release_channel(&sdata->deflink);
- ret = ieee80211_link_use_channel(&sdata->deflink,
- chandef,
- IEEE80211_CHANCTX_EXCLUSIVE);
- }
- } else {
- if (local->open_count == local->monitors) {
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
- }
- }
+ sdata = wiphy_dereference(local->hw.wiphy,
+ local->monitor_sdata);
+ if (!sdata)
+ goto done;
- if (ret == 0)
- local->monitor_chandef = *chandef;
+ if (cfg80211_chandef_identical(&sdata->vif.bss_conf.chanreq.oper,
+ &chanreq.oper))
+ return 0;
- return ret;
+ ieee80211_link_release_channel(&sdata->deflink);
+ ret = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
+ IEEE80211_CHANCTX_EXCLUSIVE);
+ if (ret)
+ return ret;
+done:
+ local->monitor_chanreq = chanreq;
+ return 0;
}
static int
@@ -953,7 +952,8 @@ ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
struct cfg80211_fils_discovery *params,
struct ieee80211_link_data *link,
- struct ieee80211_bss_conf *link_conf)
+ struct ieee80211_bss_conf *link_conf,
+ u64 *changed)
{
struct fils_discovery_data *new, *old = NULL;
struct ieee80211_fils_discovery *fd;
@@ -980,7 +980,8 @@ static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL);
}
- return BSS_CHANGED_FILS_DISCOVERY;
+ *changed |= BSS_CHANGED_FILS_DISCOVERY;
+ return 0;
}
static int
@@ -1240,6 +1241,30 @@ ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static u8 ieee80211_num_beaconing_links(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_link_data *link;
+ u8 link_id, num = 0;
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_P2P_GO)
+ return num;
+
+ if (!sdata->vif.valid_links)
+ return num;
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link)
+ continue;
+
+ if (sdata_dereference(link->u.ap.beacon, sdata))
+ num++;
+ }
+
+ return num;
+}
+
static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *params)
{
@@ -1258,6 +1283,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
unsigned int link_id = params->beacon.link_id;
struct ieee80211_link_data *link;
struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_chan_req chanreq = { .oper = params->chandef };
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1341,8 +1367,6 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
link_conf->eht_support = true;
- link_conf->eht_puncturing = params->punct_bitmap;
- changed |= BSS_CHANGED_EHT_PUNCTURING;
link_conf->eht_su_beamformer =
params->eht_cap->fixed.phy_cap_info[0] &
@@ -1370,7 +1394,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
return err;
}
- err = ieee80211_link_use_channel(link, &params->chandef,
+ err = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (!err)
ieee80211_link_copy_chanctx_to_vlans(link, false);
@@ -1445,10 +1469,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
goto error;
err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
goto error;
- changed |= err;
err = ieee80211_set_unsol_bcast_probe_resp(sdata,
&params->unsol_bcast_probe_resp,
@@ -1471,7 +1494,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID);
ieee80211_link_info_change_notify(sdata, link, changed);
- netif_carrier_on(dev);
+ if (ieee80211_num_beaconing_links(sdata) <= 1)
+ netif_carrier_on(dev);
+
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_on(vlan->dev);
@@ -1519,10 +1544,9 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
return err;
err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
return err;
- changed |= err;
err = ieee80211_set_unsol_bcast_probe_resp(sdata,
&params->unsol_bcast_probe_resp,
@@ -1565,6 +1589,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_link_data *link =
sdata_dereference(sdata->link[link_id], sdata);
struct ieee80211_bss_conf *link_conf = link->conf;
+ LIST_HEAD(keys);
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1582,10 +1607,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
/* abort any running channel switch or color change */
link_conf->csa_active = false;
link_conf->color_change_active = false;
- if (link->csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- link->csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
ieee80211_free_next_beacon(link);
@@ -1593,7 +1618,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_off(vlan->dev);
- netif_carrier_off(dev);
+
+ if (ieee80211_num_beaconing_links(sdata) <= 1)
+ netif_carrier_off(dev);
/* remove beacon and probe response */
sdata->u.ap.active = false;
@@ -1618,8 +1645,13 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
link_conf->ema_ap = false;
link_conf->bssid_indicator = 0;
- __sta_info_flush(sdata, true);
- ieee80211_free_keys(sdata, true);
+ __sta_info_flush(sdata, true, link_id);
+
+ ieee80211_remove_link_keys(link, &keys);
+ if (!list_empty(&keys)) {
+ synchronize_net();
+ ieee80211_free_key_list(local, &keys);
+ }
link_conf->enable_beacon = false;
sdata->beacon_rate_set = false;
@@ -1629,7 +1661,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
BSS_CHANGED_BEACON_ENABLED);
if (sdata->wdev.cac_started) {
- chandef = link_conf->chandef;
+ chandef = link_conf->chanreq.oper;
wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
@@ -1829,7 +1861,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
if (params->supported_rates &&
params->supported_rates_len) {
- ieee80211_parse_bitrates(link->conf->chandef.width,
+ ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
sband, params->supported_rates,
params->supported_rates_len,
&link_sta->pub->supp_rates[sband->band]);
@@ -1869,7 +1901,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
sband->band);
}
- ieee80211_sta_set_rx_nss(link_sta);
+ ieee80211_sta_init_nss(link_sta);
return ret;
}
@@ -1944,6 +1976,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
}
+ if (mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ sta->sta.spp_amsdu = set & BIT(NL80211_STA_FLAG_SPP_AMSDU);
+
/* mark TDLS channel switch support, if the AP allows it */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
!sdata->deflink.u.mgd.tdls_chan_switch_prohibited &&
@@ -2095,7 +2130,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
if (params->mac)
return sta_info_destroy_addr_bss(sdata, params->mac);
- sta_info_flush(sdata);
+ sta_info_flush(sdata, params->link_id);
return 0;
}
@@ -2602,6 +2637,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
const struct mesh_setup *setup)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = setup->chandef };
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
int err;
@@ -2618,7 +2654,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
return err;
@@ -2661,7 +2697,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
return -EINVAL;
if (params->basic_rates) {
- if (!ieee80211_parse_bitrates(link->conf->chandef.width,
+ if (!ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
wiphy->bands[sband->band],
params->basic_rates,
params->basic_rates_len,
@@ -3083,7 +3119,7 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
if (local->ops->get_txpower)
return drv_get_txpower(local, sdata, dbm);
- if (!local->use_chanctx)
+ if (local->emulate_chanctx)
*dbm = local->hw.conf.power_level;
else
*dbm = sdata->vif.bss_conf.txpower;
@@ -3153,8 +3189,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION))
return -EINVAL;
- if (ieee80211_vif_is_mld(&sdata->vif) &&
- !(sdata->vif.active_links & BIT(link->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
return 0;
old_req = link->u.mgd.req_smps;
@@ -3176,7 +3211,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
* the new value until we associate.
*/
if (!sdata->u.mgd.associated ||
- link->conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ link->conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT)
return 0;
ap = sdata->vif.cfg.ap_addr;
@@ -3207,7 +3242,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
if (err)
link->u.mgd.req_smps = old_req;
else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
- ieee80211_teardown_tdls_peers(sdata);
+ ieee80211_teardown_tdls_peers(link);
return err;
}
@@ -3254,33 +3289,57 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static void ieee80211_set_cqm_rssi_link(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link,
+ s32 rssi_thold, u32 rssi_hyst,
+ s32 rssi_low, s32 rssi_high)
+{
+ struct ieee80211_bss_conf *conf;
+
+ if (!link || !link->conf)
+ return;
+
+ conf = link->conf;
+
+ if (rssi_thold && rssi_hyst &&
+ rssi_thold == conf->cqm_rssi_thold &&
+ rssi_hyst == conf->cqm_rssi_hyst)
+ return;
+
+ conf->cqm_rssi_thold = rssi_thold;
+ conf->cqm_rssi_hyst = rssi_hyst;
+ conf->cqm_rssi_low = rssi_low;
+ conf->cqm_rssi_high = rssi_high;
+ link->u.mgd.last_cqm_event_signal = 0;
+
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
+ return;
+
+ if (sdata->u.mgd.associated &&
+ (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_CQM);
+}
+
static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_vif *vif = &sdata->vif;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-
- if (rssi_thold == bss_conf->cqm_rssi_thold &&
- rssi_hyst == bss_conf->cqm_rssi_hyst)
- return 0;
+ int link_id;
- if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER &&
- !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ if (vif->driver_flags & IEEE80211_VIF_BEACON_FILTER &&
+ !(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
return -EOPNOTSUPP;
- bss_conf->cqm_rssi_thold = rssi_thold;
- bss_conf->cqm_rssi_hyst = rssi_hyst;
- bss_conf->cqm_rssi_low = 0;
- bss_conf->cqm_rssi_high = 0;
- sdata->deflink.u.mgd.last_cqm_event_signal = 0;
+ /* For MLD, handle CQM change on all the active links */
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct ieee80211_link_data *link =
+ sdata_dereference(sdata->link[link_id], sdata);
- /* tell the driver upon association, unless already associated */
- if (sdata->u.mgd.associated &&
- sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- BSS_CHANGED_CQM);
+ ieee80211_set_cqm_rssi_link(sdata, link, rssi_thold, rssi_hyst,
+ 0, 0);
+ }
return 0;
}
@@ -3291,22 +3350,19 @@ static int ieee80211_set_cqm_rssi_range_config(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_vif *vif = &sdata->vif;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ int link_id;
- if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
+ if (vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)
return -EOPNOTSUPP;
- bss_conf->cqm_rssi_low = rssi_low;
- bss_conf->cqm_rssi_high = rssi_high;
- bss_conf->cqm_rssi_thold = 0;
- bss_conf->cqm_rssi_hyst = 0;
- sdata->deflink.u.mgd.last_cqm_event_signal = 0;
+ /* For MLD, handle CQM change on all the active links */
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct ieee80211_link_data *link =
+ sdata_dereference(sdata->link[link_id], sdata);
- /* tell the driver upon association, unless already associated */
- if (sdata->u.mgd.associated &&
- sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- BSS_CHANGED_CQM);
+ ieee80211_set_cqm_rssi_link(sdata, link, 0, 0,
+ rssi_low, rssi_high);
+ }
return 0;
}
@@ -3331,9 +3387,11 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
* so at a basic rate so that all clients can receive it.
*/
if (rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) &&
- sdata->vif.bss_conf.chandef.chan) {
+ sdata->vif.bss_conf.chanreq.oper.chan) {
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- enum nl80211_band band = sdata->vif.bss_conf.chandef.chan->band;
+ enum nl80211_band band;
+
+ band = sdata->vif.bss_conf.chanreq.oper.chan->band;
if (!(mask->control[band].legacy & basic_rates))
return -EINVAL;
@@ -3385,6 +3443,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
u32 cac_time_ms)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
struct ieee80211_local *local = sdata->local;
int err;
@@ -3399,7 +3458,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
goto out_unlock;
@@ -3542,13 +3601,24 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
return new_beacon;
}
-void ieee80211_csa_finish(struct ieee80211_vif *vif)
+void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return;
rcu_read_lock();
+ link_data = rcu_dereference(sdata->link[link_id]);
+ if (WARN_ON(!link_data)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /* TODO: MBSSID with MLO changes */
if (vif->mbssid_tx_vif == vif) {
/* Trigger ieee80211_csa_finish() on the non-transmitting
* interfaces when channel switch is received on
@@ -3567,7 +3637,7 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif)
&iter->deflink.csa_finalize_work);
}
}
- wiphy_work_queue(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
+ wiphy_work_queue(local->hw.wiphy, &link_data->csa_finalize_work);
rcu_read_unlock();
}
@@ -3579,26 +3649,27 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- sdata->deflink.csa_block_tx = block_tx;
+ sdata->csa_blocked_tx = block_tx;
sdata_info(sdata, "channel switch failed, disconnecting\n");
wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work);
}
EXPORT_SYMBOL(ieee80211_channel_switch_disconnect);
-static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_set_after_csa_beacon(struct ieee80211_link_data *link_data,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link_data->sdata;
int err;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link_data->u.ap.next_beacon)
return -EINVAL;
- err = ieee80211_assign_beacon(sdata, &sdata->deflink,
- sdata->deflink.u.ap.next_beacon,
+ err = ieee80211_assign_beacon(sdata, link_data,
+ link_data->u.ap.next_beacon,
NULL, NULL, changed);
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
if (err < 0)
return err;
@@ -3627,6 +3698,7 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
{
struct ieee80211_sub_if_data *sdata = link_data->sdata;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_bss_conf *link_conf = link_data->conf;
u64 changed = 0;
int err;
@@ -3648,40 +3720,33 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
if (link_data->reserved_ready)
return 0;
- return ieee80211_link_use_reserved_context(&sdata->deflink);
+ return ieee80211_link_use_reserved_context(link_data);
}
- if (!cfg80211_chandef_identical(&link_data->conf->chandef,
- &link_data->csa_chandef))
+ if (!cfg80211_chandef_identical(&link_conf->chanreq.oper,
+ &link_data->csa_chanreq.oper))
return -EINVAL;
- sdata->vif.bss_conf.csa_active = false;
+ link_conf->csa_active = false;
- err = ieee80211_set_after_csa_beacon(sdata, &changed);
+ err = ieee80211_set_after_csa_beacon(link_data, &changed);
if (err)
return err;
- if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) {
- sdata->vif.bss_conf.eht_puncturing =
- sdata->vif.bss_conf.csa_punct_bitmap;
- changed |= BSS_CHANGED_EHT_PUNCTURING;
- }
-
ieee80211_link_info_change_notify(sdata, link_data, changed);
- if (link_data->csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- link_data->csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
err = drv_post_channel_switch(link_data);
if (err)
return err;
- cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chandef,
- link_data->link_id,
- link_data->conf->eht_puncturing);
+ cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chanreq.oper,
+ link_data->link_id);
return 0;
}
@@ -3691,7 +3756,8 @@ static void ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
struct ieee80211_sub_if_data *sdata = link_data->sdata;
if (__ieee80211_csa_finalize(link_data)) {
- sdata_info(sdata, "failed to finalize CSA, disconnecting\n");
+ sdata_info(sdata, "failed to finalize CSA on link %d, disconnecting\n",
+ link_data->link_id);
cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev,
GFP_KERNEL);
}
@@ -3716,18 +3782,19 @@ void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work)
ieee80211_csa_finalize(link);
}
-static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_set_csa_beacon(struct ieee80211_link_data *link_data,
struct cfg80211_csa_settings *params,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link_data->sdata;
struct ieee80211_csa_settings csa = {};
int err;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- sdata->deflink.u.ap.next_beacon =
+ link_data->u.ap.next_beacon =
cfg80211_beacon_dup(&params->beacon_after);
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link_data->u.ap.next_beacon)
return -ENOMEM;
/*
@@ -3753,7 +3820,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
IEEE80211_MAX_CNTDWN_COUNTERS_NUM) ||
(params->n_counter_offsets_presp >
IEEE80211_MAX_CNTDWN_COUNTERS_NUM)) {
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
return -EINVAL;
}
@@ -3763,11 +3830,11 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
csa.n_counter_offsets_presp = params->n_counter_offsets_presp;
csa.count = params->count;
- err = ieee80211_assign_beacon(sdata, &sdata->deflink,
+ err = ieee80211_assign_beacon(sdata, link_data,
&params->beacon_csa, &csa,
NULL, changed);
if (err < 0) {
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
return err;
}
@@ -3814,7 +3881,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
/* changes into another band are not supported */
- if (sdata->vif.bss_conf.chandef.chan->band !=
+ if (sdata->vif.bss_conf.chanreq.oper.chan->band !=
params->chandef.chan->band)
return -EINVAL;
@@ -3862,11 +3929,17 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_csa_settings *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = params->chandef };
struct ieee80211_local *local = sdata->local;
- struct ieee80211_channel_switch ch_switch;
+ struct ieee80211_channel_switch ch_switch = {
+ .link_id = params->link_id,
+ };
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_data *link_data;
u64 changed = 0;
+ u8 link_id = params->link_id;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -3877,16 +3950,23 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
if (sdata->wdev.cac_started)
return -EBUSY;
- if (cfg80211_chandef_identical(&params->chandef,
- &sdata->vif.bss_conf.chandef))
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return -EINVAL;
+
+ link_data = wiphy_dereference(wiphy, sdata->link[link_id]);
+ if (!link_data)
+ return -ENOLINK;
+
+ link_conf = link_data->conf;
+
+ if (chanreq.oper.punctured && !link_conf->eht_support)
return -EINVAL;
/* don't allow another channel switch if one is already active. */
- if (sdata->vif.bss_conf.csa_active)
+ if (link_conf->csa_active)
return -EBUSY;
- conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
- lockdep_is_held(&local->hw.wiphy->mtx));
+ conf = wiphy_dereference(wiphy, link_conf->chanctx_conf);
if (!conf) {
err = -EBUSY;
goto out;
@@ -3903,14 +3983,14 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
ch_switch.timestamp = 0;
ch_switch.device_timestamp = 0;
ch_switch.block_tx = params->block_tx;
- ch_switch.chandef = params->chandef;
+ ch_switch.chandef = chanreq.oper;
ch_switch.count = params->count;
err = drv_pre_channel_switch(sdata, &ch_switch);
if (err)
goto out;
- err = ieee80211_link_reserve_chanctx(&sdata->deflink, &params->chandef,
+ err = ieee80211_link_reserve_chanctx(link_data, &chanreq,
chanctx->mode,
params->radar_required);
if (err)
@@ -3919,44 +3999,40 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
/* if reservation is invalid then this will fail */
err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0);
if (err) {
- ieee80211_link_unreserve_chanctx(&sdata->deflink);
+ ieee80211_link_unreserve_chanctx(link_data);
goto out;
}
/* if there is a color change in progress, abort it */
- if (sdata->vif.bss_conf.color_change_active)
+ if (link_conf->color_change_active)
ieee80211_color_change_abort(sdata);
- err = ieee80211_set_csa_beacon(sdata, params, &changed);
+ err = ieee80211_set_csa_beacon(link_data, params, &changed);
if (err) {
- ieee80211_link_unreserve_chanctx(&sdata->deflink);
+ ieee80211_link_unreserve_chanctx(link_data);
goto out;
}
- if (params->punct_bitmap && !sdata->vif.bss_conf.eht_support)
- goto out;
+ link_data->csa_chanreq = chanreq;
+ link_conf->csa_active = true;
- sdata->deflink.csa_chandef = params->chandef;
- sdata->deflink.csa_block_tx = params->block_tx;
- sdata->vif.bss_conf.csa_active = true;
- sdata->vif.bss_conf.csa_punct_bitmap = params->punct_bitmap;
-
- if (sdata->deflink.csa_block_tx)
+ if (params->block_tx &&
+ !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
+ sdata->csa_blocked_tx = true;
+ }
cfg80211_ch_switch_started_notify(sdata->dev,
- &sdata->deflink.csa_chandef, 0,
- params->count, params->block_tx,
- sdata->vif.bss_conf.csa_punct_bitmap);
+ &link_data->csa_chanreq.oper, 0,
+ params->count, params->block_tx);
if (changed) {
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- changed);
- drv_channel_switch_beacon(sdata, &params->chandef);
+ ieee80211_link_info_change_notify(sdata, link_data, changed);
+ drv_channel_switch_beacon(sdata, &link_data->csa_chanreq.oper);
} else {
/* if the beacon didn't change, we can finalize immediately */
- ieee80211_csa_finalize(&sdata->deflink);
+ ieee80211_csa_finalize(link_data);
}
out:
@@ -4206,15 +4282,12 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
chanctx_conf = rcu_dereference(link->conf->chanctx_conf);
if (chanctx_conf) {
- *chandef = link->conf->chandef;
+ *chandef = link->conf->chanreq.oper;
ret = 0;
} else if (local->open_count > 0 &&
local->open_count == local->monitors &&
sdata->vif.type == NL80211_IFTYPE_MONITOR) {
- if (local->use_chanctx)
- *chandef = local->monitor_chandef;
- else
- *chandef = local->_oper_chandef;
+ *chandef = local->monitor_chanreq.oper;
ret = 0;
}
out:
@@ -4262,12 +4335,13 @@ static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_link_data *link;
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
int ret;
u64 changed = 0;
link = sdata_dereference(sdata->link[link_id], sdata);
- ret = ieee80211_link_change_bandwidth(link, chandef, &changed);
+ ret = ieee80211_link_change_chanreq(link, &chanreq, &changed);
if (ret == 0)
ieee80211_link_info_change_notify(sdata, link, changed);
@@ -4749,7 +4823,7 @@ EXPORT_SYMBOL_GPL(ieee80211_color_change_finish);
void
ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap, gfp_t gfp)
+ u64 color_bitmap)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_link_data *link = &sdata->deflink;
@@ -4968,6 +5042,17 @@ static int ieee80211_set_hw_timestamp(struct wiphy *wiphy,
return local->ops->set_hw_timestamp(&local->hw, &sdata->vif, hwts);
}
+static int
+ieee80211_set_ttlm(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ttlm_params *params)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+ return ieee80211_req_neg_ttlm(sdata, params);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -5080,4 +5165,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.mod_link_station = ieee80211_mod_link_station,
.del_link_station = ieee80211_del_link_station,
.set_hw_timestamp = ieee80211_set_hw_timestamp,
+ .set_ttlm = ieee80211_set_ttlm,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index ef4c2cebc080..80e4b9784131 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* mac80211 - channel management
- * Copyright 2020 - 2022 Intel Corporation
+ * Copyright 2020 - 2024 Intel Corporation
*/
#include <linux/nl80211.h>
@@ -81,87 +81,122 @@ ieee80211_link_get_chanctx(struct ieee80211_link_data *link)
return container_of(conf, struct ieee80211_chanctx, conf);
}
-static const struct cfg80211_chan_def *
-ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
+bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b)
+{
+ if (!cfg80211_chandef_identical(&a->oper, &b->oper))
+ return false;
+ if (!a->ap.chan && !b->ap.chan)
+ return true;
+ return cfg80211_chandef_identical(&a->ap, &b->ap);
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanreq_compatible(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct cfg80211_chan_def *compat;
+
+ if (a->ap.chan && b->ap.chan &&
+ !cfg80211_chandef_identical(&a->ap, &b->ap))
+ return NULL;
+
+ compat = cfg80211_chandef_compatible(&a->oper, &b->oper);
+ if (!compat)
+ return NULL;
+
+ /* Note: later code assumes this always fills & returns tmp if compat */
+ tmp->oper = *compat;
+ tmp->ap = a->ap.chan ? a->ap : b->ap;
+ return tmp;
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_compatible(struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct ieee80211_chan_req *ret;
+ struct ieee80211_chan_req tmp2;
+
+ *tmp = (struct ieee80211_chan_req){
+ .oper = ctx->conf.def,
+ .ap = ctx->conf.ap,
+ };
+
+ ret = ieee80211_chanreq_compatible(tmp, req, &tmp2);
+ if (!ret)
+ return NULL;
+ *tmp = *ret;
+ return tmp;
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_reserved_chanreq(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
{
struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(link, &ctx->reserved_links,
- reserved_chanctx_list) {
- if (!compat)
- compat = &link->reserved_chandef;
+ if (WARN_ON(!req))
+ return NULL;
- compat = cfg80211_chandef_compatible(&link->reserved_chandef,
- compat);
- if (!compat)
+ list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) {
+ req = ieee80211_chanreq_compatible(&link->reserved, req, tmp);
+ if (!req)
break;
}
- return compat;
+ return req;
}
-static const struct cfg80211_chan_def *
+static const struct ieee80211_chan_req *
ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
+ const struct ieee80211_chan_req *compat,
+ struct ieee80211_chan_req *tmp)
{
struct ieee80211_link_data *link;
+ const struct ieee80211_chan_req *comp_def = compat;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(link, &ctx->assigned_links,
- assigned_chanctx_list) {
+ list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) {
struct ieee80211_bss_conf *link_conf = link->conf;
if (link->reserved_chanctx)
continue;
- if (!compat)
- compat = &link_conf->chandef;
-
- compat = cfg80211_chandef_compatible(
- &link_conf->chandef, compat);
- if (!compat)
+ comp_def = ieee80211_chanreq_compatible(&link_conf->chanreq,
+ comp_def, tmp);
+ if (!comp_def)
break;
}
- return compat;
-}
-
-static const struct cfg80211_chan_def *
-ieee80211_chanctx_combined_chandef(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
-{
- lockdep_assert_wiphy(local->hw.wiphy);
-
- compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat);
- if (!compat)
- return NULL;
-
- compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat);
- if (!compat)
- return NULL;
-
- return compat;
+ return comp_def;
}
static bool
-ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *def)
+ieee80211_chanctx_can_reserve(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req)
{
+ struct ieee80211_chan_req tmp;
+
lockdep_assert_wiphy(local->hw.wiphy);
- if (ieee80211_chanctx_combined_chandef(local, ctx, def))
- return true;
+ if (!ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp))
+ return false;
+
+ if (!ieee80211_chanctx_non_reserved_chandef(local, ctx, req, &tmp))
+ return false;
if (!list_empty(&ctx->reserved_links) &&
- ieee80211_chanctx_reserved_chandef(local, ctx, def))
+ ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp))
return true;
return false;
@@ -169,7 +204,7 @@ ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
static struct ieee80211_chanctx *
ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -186,8 +221,7 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
continue;
- if (!ieee80211_chanctx_can_reserve_chandef(local, ctx,
- chandef))
+ if (!ieee80211_chanctx_can_reserve(local, ctx, chanreq))
continue;
return ctx;
@@ -202,7 +236,7 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta,
enum ieee80211_sta_rx_bandwidth width;
struct link_sta_info *link_sta;
- link_sta = rcu_dereference(sta->link[link_id]);
+ link_sta = wiphy_dereference(sta->local->hw.wiphy, sta->link[link_id]);
/* no effect if this STA has no presence on this link */
if (!link_sta)
@@ -240,9 +274,10 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta,
}
static enum nl80211_chan_width
-ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
- unsigned int link_id)
+ieee80211_get_max_required_bw(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
struct sta_info *sta;
@@ -258,31 +293,25 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
}
static enum nl80211_chan_width
-ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
+ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx,
+ struct ieee80211_link_data *rsvd_for)
{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
- struct ieee80211_vif *vif = &sdata->vif;
- int link_id;
- rcu_read_lock();
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ for_each_sdata_link(local, link) {
enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
- struct ieee80211_link_data *link =
- rcu_dereference(sdata->link[link_id]);
-
- if (!link)
- continue;
if (link != rsvd_for &&
rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
continue;
- switch (vif->type) {
+ switch (link->sdata->vif.type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
- width = ieee80211_get_max_required_bw(sdata, link_id);
+ width = ieee80211_get_max_required_bw(link);
break;
case NL80211_IFTYPE_STATION:
/*
@@ -290,8 +319,8 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
* point, so take the width from the chandef, but
* account also for TDLS peers
*/
- width = max(link->conf->chandef.width,
- ieee80211_get_max_required_bw(sdata, link_id));
+ width = max(link->conf->chanreq.oper.width,
+ ieee80211_get_max_required_bw(link));
break;
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_NAN:
@@ -299,7 +328,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
- width = link->conf->chandef.width;
+ width = link->conf->chanreq.oper.width;
break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_UNSPECIFIED:
@@ -312,40 +341,13 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
max_bw = max(max_bw, width);
}
- rcu_read_unlock();
-
- return max_bw;
-}
-
-static enum nl80211_chan_width
-ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
-{
- struct ieee80211_sub_if_data *sdata;
- enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- enum nl80211_chan_width width;
-
- if (!ieee80211_sdata_running(sdata))
- continue;
-
- width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx,
- rsvd_for);
-
- max_bw = max(max_bw, width);
- }
/* use the configured bandwidth in case of monitor interface */
- sdata = rcu_dereference(local->monitor_sdata);
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
if (sdata &&
rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf)
max_bw = max(max_bw, ctx->conf.def.width);
- rcu_read_unlock();
-
return max_bw;
}
@@ -382,7 +384,7 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
/* downgrade chandef up to max_bw */
min_def = ctx->conf.def;
while (min_def.width > max_bw)
- ieee80211_chandef_downgrade(&min_def);
+ ieee80211_chandef_downgrade(&min_def, NULL);
if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def))
return 0;
@@ -395,7 +397,7 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
}
/* calling this function is assuming that station vif is updated to
- * lates changes by calling ieee80211_link_update_chandef
+ * lates changes by calling ieee80211_link_update_chanreq
*/
static void ieee80211_chan_bw_change(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
@@ -475,10 +477,15 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
static void _ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_chanctx *old_ctx,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
struct ieee80211_link_data *rsvd_for)
{
- u32 changed;
+ const struct cfg80211_chan_def *chandef = &chanreq->oper;
+ struct ieee80211_chan_req ctx_req = {
+ .oper = ctx->conf.def,
+ .ap = ctx->conf.ap,
+ };
+ u32 changed = 0;
/* expected to handle only 20/40/80/160/320 channel widths */
switch (chandef->width) {
@@ -500,47 +507,52 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local,
*/
ieee80211_chan_bw_change(local, old_ctx, true);
- if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
+ if (ieee80211_chanreq_identical(&ctx_req, chanreq)) {
ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
return;
}
- WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+ WARN_ON(ieee80211_chanctx_refcount(local, ctx) > 1 &&
+ !cfg80211_chandef_compatible(&ctx->conf.def, &chanreq->oper));
ieee80211_remove_wbrf(local, &ctx->conf.def);
+ if (!cfg80211_chandef_identical(&ctx->conf.def, &chanreq->oper)) {
+ if (ctx->conf.def.width != chanreq->oper.width)
+ changed |= IEEE80211_CHANCTX_CHANGE_WIDTH;
+ if (ctx->conf.def.punctured != chanreq->oper.punctured)
+ changed |= IEEE80211_CHANCTX_CHANGE_PUNCTURING;
+ }
+ if (!cfg80211_chandef_identical(&ctx->conf.ap, &chanreq->ap))
+ changed |= IEEE80211_CHANCTX_CHANGE_AP;
ctx->conf.def = *chandef;
+ ctx->conf.ap = chanreq->ap;
/* check if min chanctx also changed */
- changed = IEEE80211_CHANCTX_CHANGE_WIDTH |
- _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
ieee80211_add_wbrf(local, &ctx->conf.def);
drv_change_chanctx(local, ctx, changed);
- if (!local->use_chanctx) {
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
- }
-
- /* check is BW wider */
+ /* check if BW is wider */
ieee80211_chan_bw_change(local, old_ctx, false);
}
static void ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_chanctx *old_ctx,
- const struct cfg80211_chan_def *chandef)
+ const struct ieee80211_chan_req *chanreq)
{
- _ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL);
+ _ieee80211_change_chanctx(local, ctx, old_ctx, chanreq, NULL);
}
static struct ieee80211_chanctx *
ieee80211_find_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
+ struct ieee80211_chan_req tmp;
struct ieee80211_chanctx *ctx;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -549,7 +561,7 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
return NULL;
list_for_each_entry(ctx, &local->chanctx_list, list) {
- const struct cfg80211_chan_def *compat;
+ const struct ieee80211_chan_req *compat;
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE)
continue;
@@ -557,12 +569,12 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
continue;
- compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef);
+ compat = ieee80211_chanctx_compatible(ctx, chanreq, &tmp);
if (!compat)
continue;
- compat = ieee80211_chanctx_reserved_chandef(local, ctx,
- compat);
+ compat = ieee80211_chanctx_reserved_chanreq(local, ctx,
+ compat, &tmp);
if (!compat)
continue;
@@ -576,26 +588,14 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
bool ieee80211_is_radar_required(struct ieee80211_local *local)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- unsigned int link_id;
-
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
-
- if (link && link->radar_required) {
- rcu_read_unlock();
- return true;
- }
- }
+ for_each_sdata_link(local, link) {
+ if (link->radar_required)
+ return true;
}
- rcu_read_unlock();
return false;
}
@@ -605,43 +605,24 @@ ieee80211_chanctx_radar_required(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
- struct ieee80211_sub_if_data *sdata;
- bool required = false;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- unsigned int link_id;
-
- if (!ieee80211_sdata_running(sdata))
+ for_each_sdata_link(local, link) {
+ if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
continue;
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
- if (!link)
- continue;
-
- if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
- continue;
- if (!link->radar_required)
- continue;
- required = true;
- break;
- }
-
- if (required)
- break;
+ if (!link->radar_required)
+ continue;
+ return true;
}
- rcu_read_unlock();
- return required;
+ return false;
}
static struct ieee80211_chanctx *
ieee80211_alloc_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -654,7 +635,8 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
INIT_LIST_HEAD(&ctx->assigned_links);
INIT_LIST_HEAD(&ctx->reserved_links);
- ctx->conf.def = *chandef;
+ ctx->conf.def = chanreq->oper;
+ ctx->conf.ap = chanreq->ap;
ctx->conf.rx_chains_static = 1;
ctx->conf.rx_chains_dynamic = 1;
ctx->mode = mode;
@@ -674,23 +656,15 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
ieee80211_add_wbrf(local, &ctx->conf.def);
- if (!local->use_chanctx)
- local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
-
/* turn idle off *before* setting channel -- some drivers need that */
changed = ieee80211_idle_off(local);
if (changed)
ieee80211_hw_config(local, changed);
- if (!local->use_chanctx) {
- local->_oper_chandef = ctx->conf.def;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- } else {
- err = drv_add_chanctx(local, ctx);
- if (err) {
- ieee80211_recalc_idle(local);
- return err;
- }
+ err = drv_add_chanctx(local, ctx);
+ if (err) {
+ ieee80211_recalc_idle(local);
+ return err;
}
return 0;
@@ -698,7 +672,7 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
static struct ieee80211_chanctx *
ieee80211_new_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -706,7 +680,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
lockdep_assert_wiphy(local->hw.wiphy);
- ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+ ctx = ieee80211_alloc_chanctx(local, chanreq, mode);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -725,32 +699,7 @@ static void ieee80211_del_chanctx(struct ieee80211_local *local,
{
lockdep_assert_wiphy(local->hw.wiphy);
- if (!local->use_chanctx) {
- struct cfg80211_chan_def *chandef = &local->_oper_chandef;
- /* S1G doesn't have 20MHz, so get the correct width for the
- * current channel.
- */
- if (chandef->chan->band == NL80211_BAND_S1GHZ)
- chandef->width =
- ieee80211_s1g_channel_width(chandef->chan);
- else
- chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef->center_freq1 = chandef->chan->center_freq;
- chandef->freq1_offset = chandef->chan->freq_offset;
- chandef->center_freq2 = 0;
-
- /* NOTE: Disabling radar is only valid here for
- * single channel context. To be sure, check it ...
- */
- WARN_ON(local->hw.conf.radar_enabled &&
- !list_empty(&local->chanctx_list));
-
- local->hw.conf.radar_enabled = false;
-
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- } else {
- drv_remove_chanctx(local, ctx);
- }
+ drv_remove_chanctx(local, ctx);
ieee80211_recalc_idle(local);
@@ -773,64 +722,53 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
- struct ieee80211_sub_if_data *sdata;
- const struct cfg80211_chan_def *compat = NULL;
+ const struct ieee80211_chan_req *compat = NULL;
+ struct ieee80211_link_data *link;
+ struct ieee80211_chan_req tmp;
struct sta_info *sta;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- int link_id;
+ for_each_sdata_link(local, link) {
+ struct ieee80211_bss_conf *link_conf;
- if (!ieee80211_sdata_running(sdata))
+ if (link->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- continue;
+ link_conf = link->conf;
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(sdata->vif.link_conf[link_id]);
-
- if (!link_conf)
- continue;
-
- if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
- continue;
+ if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
+ continue;
- if (!compat)
- compat = &link_conf->chandef;
+ if (!compat)
+ compat = &link_conf->chanreq;
- compat = cfg80211_chandef_compatible(&link_conf->chandef,
- compat);
- if (WARN_ON_ONCE(!compat))
- break;
- }
+ compat = ieee80211_chanreq_compatible(&link_conf->chanreq,
+ compat, &tmp);
+ if (WARN_ON_ONCE(!compat))
+ return;
}
- if (WARN_ON_ONCE(!compat)) {
- rcu_read_unlock();
+ if (WARN_ON_ONCE(!compat))
return;
- }
/* TDLS peers can sometimes affect the chandef width */
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ list_for_each_entry(sta, &local->sta_list, list) {
+ struct ieee80211_chan_req tdls_chanreq = {};
if (!sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
!sta->tdls_chandef.chan)
continue;
- compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
- compat);
+ tdls_chanreq.oper = sta->tdls_chandef;
+
+ /* note this always fills and returns &tmp if compat */
+ compat = ieee80211_chanreq_compatible(&tdls_chanreq,
+ compat, &tmp);
if (WARN_ON_ONCE(!compat))
- break;
+ return;
}
- rcu_read_unlock();
-
- if (!compat)
- return;
ieee80211_change_chanctx(local, ctx, ctx, compat);
}
@@ -849,11 +787,6 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
chanctx->conf.radar_enabled = radar_enabled;
- if (!local->use_chanctx) {
- local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- }
-
drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
}
@@ -924,23 +857,19 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata;
u8 rx_chains_static, rx_chains_dynamic;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
rx_chains_static = 1;
rx_chains_dynamic = 1;
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ for_each_sdata_link(local, link) {
u8 needed_static, needed_dynamic;
- unsigned int link_id;
- if (!ieee80211_sdata_running(sdata))
- continue;
-
- switch (sdata->vif.type) {
+ switch (link->sdata->vif.type) {
case NL80211_IFTYPE_STATION:
- if (!sdata->u.mgd.associated)
+ if (!link->sdata->u.mgd.associated)
continue;
break;
case NL80211_IFTYPE_AP:
@@ -952,59 +881,38 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
continue;
}
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
-
- if (!link)
- continue;
-
- if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf)
- continue;
-
- switch (link->smps_mode) {
- default:
- WARN_ONCE(1, "Invalid SMPS mode %d\n",
- link->smps_mode);
- fallthrough;
- case IEEE80211_SMPS_OFF:
- needed_static = link->needed_rx_chains;
- needed_dynamic = link->needed_rx_chains;
- break;
- case IEEE80211_SMPS_DYNAMIC:
- needed_static = 1;
- needed_dynamic = link->needed_rx_chains;
- break;
- case IEEE80211_SMPS_STATIC:
- needed_static = 1;
- needed_dynamic = 1;
- break;
- }
+ if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf)
+ continue;
- rx_chains_static = max(rx_chains_static, needed_static);
- rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
+ switch (link->smps_mode) {
+ default:
+ WARN_ONCE(1, "Invalid SMPS mode %d\n",
+ link->smps_mode);
+ fallthrough;
+ case IEEE80211_SMPS_OFF:
+ needed_static = link->needed_rx_chains;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ needed_static = 1;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ needed_static = 1;
+ needed_dynamic = 1;
+ break;
}
+
+ rx_chains_static = max(rx_chains_static, needed_static);
+ rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
}
/* Disable SMPS for the monitor interface */
- sdata = rcu_dereference(local->monitor_sdata);
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
if (sdata &&
rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &chanctx->conf)
rx_chains_dynamic = rx_chains_static = local->rx_chains;
- rcu_read_unlock();
-
- if (!local->use_chanctx) {
- if (rx_chains_static > 1)
- local->smps_mode = IEEE80211_SMPS_OFF;
- else if (rx_chains_dynamic > 1)
- local->smps_mode = IEEE80211_SMPS_DYNAMIC;
- else
- local->smps_mode = IEEE80211_SMPS_STATIC;
- ieee80211_hw_config(local, 0);
- }
-
if (rx_chains_static == chanctx->conf.rx_chains_static &&
rx_chains_dynamic == chanctx->conf.rx_chains_dynamic)
return;
@@ -1043,17 +951,16 @@ __ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
if (clear)
conf = NULL;
- rcu_read_lock();
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
struct ieee80211_bss_conf *vlan_conf;
- vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]);
+ vlan_conf = wiphy_dereference(local->hw.wiphy,
+ vlan->vif.link_conf[link_id]);
if (WARN_ON(!vlan_conf))
continue;
rcu_assign_pointer(vlan_conf->chanctx_conf, conf);
}
- rcu_read_unlock();
}
void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
@@ -1103,7 +1010,7 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
}
int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode,
bool radar_required)
{
@@ -1114,13 +1021,13 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
lockdep_assert_wiphy(local->hw.wiphy);
curr_ctx = ieee80211_link_get_chanctx(link);
- if (curr_ctx && local->use_chanctx && !local->ops->switch_vif_chanctx)
+ if (curr_ctx && !local->ops->switch_vif_chanctx)
return -EOPNOTSUPP;
- new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_find_reservation_chanctx(local, chanreq, mode);
if (!new_ctx) {
if (ieee80211_can_create_new_chanctx(local)) {
- new_ctx = ieee80211_new_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_new_chanctx(local, chanreq, mode);
if (IS_ERR(new_ctx))
return PTR_ERR(new_ctx);
} else {
@@ -1174,7 +1081,7 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
!list_empty(&curr_ctx->reserved_links))
return -EBUSY;
- new_ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode);
if (!new_ctx)
return -ENOMEM;
@@ -1192,7 +1099,7 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links);
link->reserved_chanctx = new_ctx;
- link->reserved_chandef = *chandef;
+ link->reserved = *chanreq;
link->reserved_radar_required = radar_required;
link->reserved_ready = false;
@@ -1231,29 +1138,28 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link)
}
static void
-ieee80211_link_update_chandef(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef)
+ieee80211_link_update_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
unsigned int link_id = link->link_id;
struct ieee80211_sub_if_data *vlan;
- link->conf->chandef = *chandef;
+ link->conf->chanreq = *chanreq;
if (sdata->vif.type != NL80211_IFTYPE_AP)
return;
- rcu_read_lock();
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
struct ieee80211_bss_conf *vlan_conf;
- vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]);
+ vlan_conf = wiphy_dereference(sdata->local->hw.wiphy,
+ vlan->vif.link_conf[link_id]);
if (WARN_ON(!vlan_conf))
continue;
- vlan_conf->chandef = *chandef;
+ vlan_conf->chanreq = *chanreq;
}
- rcu_read_unlock();
}
static int
@@ -1264,7 +1170,8 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
struct ieee80211_local *local = sdata->local;
struct ieee80211_vif_chanctx_switch vif_chsw[1] = {};
struct ieee80211_chanctx *old_ctx, *new_ctx;
- const struct cfg80211_chan_def *chandef;
+ const struct ieee80211_chan_req *chanreq;
+ struct ieee80211_chan_req tmp;
u64 changed = 0;
int err;
@@ -1286,17 +1193,18 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
IEEE80211_CHANCTX_REPLACES_OTHER))
return -EINVAL;
- chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &link->reserved_chandef);
- if (WARN_ON(!chandef))
+ chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+ &link->reserved,
+ &tmp);
+ if (WARN_ON(!chanreq))
return -EINVAL;
- if (link_conf->chandef.width != link->reserved_chandef.width)
+ if (link_conf->chanreq.oper.width != link->reserved.oper.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_link_update_chandef(link, &link->reserved_chandef);
+ ieee80211_link_update_chanreq(link, &link->reserved);
- _ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link);
+ _ieee80211_change_chanctx(local, new_ctx, old_ctx, chanreq, link);
vif_chsw[0].vif = &sdata->vif;
vif_chsw[0].old_ctx = &old_ctx->conf;
@@ -1344,7 +1252,8 @@ ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link)
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx *old_ctx, *new_ctx;
- const struct cfg80211_chan_def *chandef;
+ const struct ieee80211_chan_req *chanreq;
+ struct ieee80211_chan_req tmp;
int err;
old_ctx = ieee80211_link_get_chanctx(link);
@@ -1363,12 +1272,13 @@ ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link)
IEEE80211_CHANCTX_REPLACES_OTHER))
return -EINVAL;
- chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &link->reserved_chandef);
- if (WARN_ON(!chandef))
+ chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+ &link->reserved,
+ &tmp);
+ if (WARN_ON(!chanreq))
return -EINVAL;
- ieee80211_change_chanctx(local, new_ctx, new_ctx, chandef);
+ ieee80211_change_chanctx(local, new_ctx, new_ctx, chanreq);
list_del(&link->reserved_chanctx_list);
link->reserved_chanctx = NULL;
@@ -1412,24 +1322,6 @@ ieee80211_link_has_in_place_reservation(struct ieee80211_link_data *link)
return true;
}
-static int ieee80211_chsw_switch_hwconf(struct ieee80211_local *local,
- struct ieee80211_chanctx *new_ctx)
-{
- const struct cfg80211_chan_def *chandef;
-
- lockdep_assert_wiphy(local->hw.wiphy);
-
- chandef = ieee80211_chanctx_reserved_chandef(local, new_ctx, NULL);
- if (WARN_ON(!chandef))
- return -EINVAL;
-
- local->hw.conf.radar_enabled = new_ctx->conf.radar_enabled;
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
-
- return 0;
-}
-
static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local,
int n_vifs)
{
@@ -1518,7 +1410,6 @@ err:
static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
{
struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx;
- struct ieee80211_chanctx *new_ctx = NULL;
int err, n_assigned, n_reserved, n_ready;
int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0;
@@ -1551,9 +1442,6 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
goto err;
}
- if (!local->use_chanctx)
- new_ctx = ctx;
-
n_ctx++;
n_assigned = 0;
@@ -1607,9 +1495,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
if (WARN_ON(n_ctx == 0) ||
WARN_ON(n_vifs_switch == 0 &&
n_vifs_assign == 0 &&
- n_vifs_ctxless == 0) ||
- WARN_ON(n_ctx > 1 && !local->use_chanctx) ||
- WARN_ON(!new_ctx && !local->use_chanctx)) {
+ n_vifs_ctxless == 0)) {
err = -EINVAL;
goto err;
}
@@ -1619,20 +1505,14 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
* reservations and driver capabilities.
*/
- if (local->use_chanctx) {
- if (n_vifs_switch > 0) {
- err = ieee80211_chsw_switch_vifs(local, n_vifs_switch);
- if (err)
- goto err;
- }
+ if (n_vifs_switch > 0) {
+ err = ieee80211_chsw_switch_vifs(local, n_vifs_switch);
+ if (err)
+ goto err;
+ }
- if (n_vifs_assign > 0 || n_vifs_ctxless > 0) {
- err = ieee80211_chsw_switch_ctxs(local);
- if (err)
- goto err;
- }
- } else {
- err = ieee80211_chsw_switch_hwconf(local, new_ctx);
+ if (n_vifs_assign > 0 || n_vifs_ctxless > 0) {
+ err = ieee80211_chsw_switch_ctxs(local);
if (err)
goto err;
}
@@ -1672,10 +1552,10 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
link->radar_required = link->reserved_radar_required;
- if (link_conf->chandef.width != link->reserved_chandef.width)
+ if (link_conf->chanreq.oper.width != link->reserved.oper.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_link_update_chandef(link, &link->reserved_chandef);
+ ieee80211_link_update_chanreq(link, &link->reserved);
if (changed)
ieee80211_link_info_change_notify(sdata,
link,
@@ -1810,7 +1690,7 @@ static void __ieee80211_link_release_channel(struct ieee80211_link_data *link)
}
int ieee80211_link_use_channel(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
@@ -1821,38 +1701,37 @@ int ieee80211_link_use_channel(struct ieee80211_link_data *link,
lockdep_assert_wiphy(local->hw.wiphy);
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link->link_id))) {
- ieee80211_link_update_chandef(link, chandef);
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) {
+ ieee80211_link_update_chanreq(link, chanreq);
return 0;
}
ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
- chandef,
+ &chanreq->oper,
sdata->wdev.iftype);
if (ret < 0)
goto out;
if (ret > 0)
- radar_detect_width = BIT(chandef->width);
+ radar_detect_width = BIT(chanreq->oper.width);
link->radar_required = ret;
- ret = ieee80211_check_combinations(sdata, chandef, mode,
+ ret = ieee80211_check_combinations(sdata, &chanreq->oper, mode,
radar_detect_width);
if (ret < 0)
goto out;
__ieee80211_link_release_channel(link);
- ctx = ieee80211_find_chanctx(local, chandef, mode);
+ ctx = ieee80211_find_chanctx(local, chanreq, mode);
if (!ctx)
- ctx = ieee80211_new_chanctx(local, chandef, mode);
+ ctx = ieee80211_new_chanctx(local, chanreq, mode);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out;
}
- ieee80211_link_update_chandef(link, chandef);
+ ieee80211_link_update_chanreq(link, chanreq);
ret = ieee80211_assign_link_chanctx(link, ctx);
if (ret) {
@@ -1932,28 +1811,79 @@ int ieee80211_link_use_reserved_context(struct ieee80211_link_data *link)
return 0;
}
-int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- u64 *changed)
+/*
+ * This is similar to ieee80211_chanctx_compatible(), but rechecks
+ * against all the links actually using it (except the one that's
+ * passed, since that one is changing).
+ * This is done in order to allow changes to the AP's bandwidth for
+ * wider bandwidth OFDMA purposes, which wouldn't be treated as
+ * compatible by ieee80211_chanctx_recheck() but is OK if the link
+ * requesting the update is the only one using it.
+ */
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_recheck(struct ieee80211_local *local,
+ struct ieee80211_link_data *skip_link,
+ struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct ieee80211_chan_req *ret = req;
+ struct ieee80211_link_data *link;
+
+ lockdep_assert_wiphy(local->hw.wiphy);
+
+ for_each_sdata_link(local, link) {
+ if (link == skip_link)
+ continue;
+
+ if (rcu_access_pointer(link->conf->chanctx_conf) == &ctx->conf) {
+ ret = ieee80211_chanreq_compatible(ret,
+ &link->conf->chanreq,
+ tmp);
+ if (!ret)
+ return NULL;
+ }
+
+ if (link->reserved_chanctx == ctx) {
+ ret = ieee80211_chanreq_compatible(ret,
+ &link->reserved,
+ tmp);
+ if (!ret)
+ return NULL;
+ }
+ }
+
+ *tmp = *ret;
+ return tmp;
+}
+
+int ieee80211_link_change_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq,
+ u64 *changed)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_bss_conf *link_conf = link->conf;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *ctx;
- const struct cfg80211_chan_def *compat;
+ const struct ieee80211_chan_req *compat;
+ struct ieee80211_chan_req tmp;
lockdep_assert_wiphy(local->hw.wiphy);
- if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy,
+ &chanreq->oper,
IEEE80211_CHAN_DISABLED))
return -EINVAL;
- if (cfg80211_chandef_identical(chandef, &link_conf->chandef))
+ /* for non-HT 20 MHz the rest doesn't matter */
+ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT &&
+ cfg80211_chandef_identical(&chanreq->oper, &link_conf->chanreq.oper))
return 0;
- if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
- link_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ /* but you cannot switch to/from it */
+ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT)
return -EINVAL;
conf = rcu_dereference_protected(link_conf->chanctx_conf,
@@ -1963,13 +1893,14 @@ int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
ctx = container_of(conf, struct ieee80211_chanctx, conf);
- compat = cfg80211_chandef_compatible(&conf->def, chandef);
+ compat = ieee80211_chanctx_recheck(local, link, ctx, chanreq, &tmp);
if (!compat)
return -EINVAL;
switch (ctx->replace_state) {
case IEEE80211_CHANCTX_REPLACE_NONE:
- if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat))
+ if (!ieee80211_chanctx_reserved_chanreq(local, ctx, compat,
+ &tmp))
return -EBUSY;
break;
case IEEE80211_CHANCTX_WILL_BE_REPLACED:
@@ -1984,7 +1915,7 @@ int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
break;
}
- ieee80211_link_update_chandef(link, chandef);
+ ieee80211_link_update_chanreq(link, chanreq);
ieee80211_recalc_chanctx_chantype(local, ctx);
@@ -2019,12 +1950,11 @@ void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link)
ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
- rcu_read_lock();
- ap_conf = rcu_dereference(ap->vif.link_conf[link_id]);
- conf = rcu_dereference_protected(ap_conf->chanctx_conf,
- lockdep_is_held(&local->hw.wiphy->mtx));
+ ap_conf = wiphy_dereference(local->hw.wiphy,
+ ap->vif.link_conf[link_id]);
+ conf = wiphy_dereference(local->hw.wiphy,
+ ap_conf->chanctx_conf);
rcu_assign_pointer(link_conf->chanctx_conf, conf);
- rcu_read_unlock();
}
void ieee80211_iter_chan_contexts_atomic(
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index d49894df2351..49da401c5340 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -152,16 +152,17 @@ do { \
else \
_sdata_err((link)->sdata, fmt, ##__VA_ARGS__); \
} while (0)
-#define link_dbg(link, fmt, ...) \
+#define _link_id_dbg(print, sdata, link_id, fmt, ...) \
do { \
- if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \
- _sdata_dbg(1, (link)->sdata, "[link %d] " fmt, \
- (link)->link_id, \
- ##__VA_ARGS__); \
+ if (ieee80211_vif_is_mld(&(sdata)->vif)) \
+ _sdata_dbg(print, sdata, "[link %d] " fmt, \
+ link_id, ##__VA_ARGS__); \
else \
- _sdata_dbg(1, (link)->sdata, fmt, \
- ##__VA_ARGS__); \
+ _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__); \
} while (0)
+#define link_dbg(link, fmt, ...) \
+ _link_id_dbg(1, (link)->sdata, (link)->link_id, \
+ fmt, ##__VA_ARGS__)
#define ht_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_HT_DEBUG, \
@@ -226,6 +227,9 @@ do { \
#define mlme_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_MLME_DEBUG, \
sdata, fmt, ##__VA_ARGS__)
+#define mlme_link_id_dbg(sdata, link_id, fmt, ...) \
+ _link_id_dbg(MAC80211_MLME_DEBUG, sdata, link_id, \
+ fmt, ##__VA_ARGS__)
#define mlme_dbg_ratelimited(sdata, fmt, ...) \
_sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 74be49191e70..2f68e92a7404 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -4,7 +4,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021-2024 Intel Corporation
*/
#include <linux/debugfs.h>
@@ -498,6 +498,7 @@ static const char *hw_flag_names[] = {
FLAG(DETECTS_COLOR_COLLISION),
FLAG(MLO_MCAST_MULTI_LINK_TX),
FLAG(DISALLOW_PUNCTURING),
+ FLAG(HANDLES_QUIET_CSA),
#undef FLAG
};
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index 3b7f70073fc3..dce37ba8ebe3 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015 Intel Deutschland GmbH
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include <net/mac80211.h>
#include "ieee80211_i.h"
@@ -214,8 +214,7 @@ int drv_conf_tx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
return 0;
if (params->cw_min == 0 || params->cw_min > params->cw_max) {
@@ -315,8 +314,7 @@ int drv_assign_vif_chanctx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link_conf->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_conf->link_id))
return 0;
trace_drv_assign_vif_chanctx(local, sdata, link_conf, ctx);
@@ -343,8 +341,7 @@ void drv_unassign_vif_chanctx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link_conf->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_conf->link_id))
return;
trace_drv_unassign_vif_chanctx(local, sdata, link_conf, ctx);
@@ -461,8 +458,7 @@ void drv_link_info_changed(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_id))
return;
trace_drv_link_info_changed(local, sdata, info, changed);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index eb482fb8c3af..5d078c0a2323 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2019, 2021 - 2023 Intel Corporation
+* Copyright (C) 2018-2019, 2021-2024 Intel Corporation
*/
#ifndef __MAC80211_DRIVER_OPS
@@ -1180,8 +1180,9 @@ drv_post_channel_switch(struct ieee80211_link_data *link)
}
static inline void
-drv_abort_channel_switch(struct ieee80211_sub_if_data *sdata)
+drv_abort_channel_switch(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
might_sleep();
@@ -1193,7 +1194,8 @@ drv_abort_channel_switch(struct ieee80211_sub_if_data *sdata)
trace_drv_abort_channel_switch(local, sdata);
if (local->ops->abort_channel_switch)
- local->ops->abort_channel_switch(&local->hw, &sdata->vif);
+ local->ops->abort_channel_switch(&local->hw, &sdata->vif,
+ link->conf);
}
static inline void
@@ -1695,4 +1697,23 @@ int drv_change_sta_links(struct ieee80211_local *local,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links);
+static inline enum ieee80211_neg_ttlm_res
+drv_can_neg_ttlm(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ enum ieee80211_neg_ttlm_res res = NEG_TTLM_RES_REJECT;
+
+ might_sleep();
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_can_neg_ttlm(local, sdata, neg_ttlm);
+ if (local->ops->can_neg_ttlm)
+ res = local->ops->can_neg_ttlm(&local->hw, &sdata->vif,
+ neg_ttlm);
+ trace_drv_neg_ttlm_res(local, sdata, res, neg_ttlm);
+
+ return res;
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 749f4ecab990..c3330aea4da3 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright 2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -257,7 +257,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!link_conf))
width = NL80211_CHAN_WIDTH_20_NOHT;
else
- width = link_conf->chandef.width;
+ width = link_conf->chanreq.oper.width;
switch (width) {
default:
@@ -603,6 +603,8 @@ void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id,
if (WARN_ON(!link))
goto out;
+ trace_api_request_smps(sdata->local, sdata, link, smps_mode);
+
if (link->u.mgd.driver_smps_mode == smps_mode)
goto out;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 8f2b445a5ec3..7ace5cdc6c26 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -223,7 +223,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
struct cfg80211_bss *bss;
u64 bss_change;
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq = {};
struct ieee80211_channel *chan;
struct beacon_data *presp;
struct cfg80211_inform_bss bss_meta = {};
@@ -237,7 +237,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
drv_reset_tsf(local, sdata);
if (!ether_addr_equal(ifibss->bssid, bssid))
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
/* if merging, indicate to driver that we leave the old IBSS */
if (sdata->vif.cfg.ibss_joined) {
@@ -257,22 +257,22 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
kfree_rcu(presp, rcu_head);
/* make a copy of the chandef, it could be modified below. */
- chandef = *req_chandef;
- chan = chandef.chan;
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+ chanreq.oper = *req_chandef;
+ chan = chanreq.oper.chan;
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper,
NL80211_IFTYPE_ADHOC)) {
- if (chandef.width == NL80211_CHAN_WIDTH_5 ||
- chandef.width == NL80211_CHAN_WIDTH_10 ||
- chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- chandef.width == NL80211_CHAN_WIDTH_20) {
+ if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_10 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_20) {
sdata_info(sdata,
"Failed to join IBSS, beacons forbidden\n");
return;
}
- chandef.width = NL80211_CHAN_WIDTH_20;
- chandef.center_freq1 = chan->center_freq;
+ chanreq.oper.width = NL80211_CHAN_WIDTH_20;
+ chanreq.oper.center_freq1 = chan->center_freq;
/* check again for downgraded chandef */
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper,
NL80211_IFTYPE_ADHOC)) {
sdata_info(sdata,
"Failed to join IBSS, beacons forbidden\n");
@@ -281,7 +281,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
- &chandef, NL80211_IFTYPE_ADHOC);
+ &chanreq.oper, NL80211_IFTYPE_ADHOC);
if (err < 0) {
sdata_info(sdata,
"Failed to join IBSS, invalid chandef\n");
@@ -295,7 +295,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
radar_required = err;
- if (ieee80211_link_use_channel(&sdata->deflink, &chandef,
+ if (ieee80211_link_use_channel(&sdata->deflink, &chanreq,
ifibss->fixed_channel ?
IEEE80211_CHANCTX_SHARED :
IEEE80211_CHANCTX_EXCLUSIVE)) {
@@ -307,7 +307,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memcpy(ifibss->bssid, bssid, ETH_ALEN);
presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
- capability, tsf, &chandef,
+ capability, tsf, &chanreq.oper,
&have_higher_than_11mbit, NULL);
if (!presp)
return;
@@ -533,12 +533,12 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
IEEE80211_PRIVACY(ifibss->privacy));
/* XXX: should not really modify cfg80211 data */
if (cbss) {
- cbss->channel = sdata->deflink.csa_chandef.chan;
+ cbss->channel = sdata->deflink.csa_chanreq.oper.chan;
cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
}
}
- ifibss->chandef = sdata->deflink.csa_chandef;
+ ifibss->chandef = sdata->deflink.csa_chanreq.oper;
/* generate the beacon */
return ieee80211_ibss_csa_beacon(sdata, NULL, changed);
@@ -682,7 +682,7 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
spin_lock_bh(&ifibss->incomplete_lock);
while (!list_empty(&ifibss->incomplete_stations)) {
@@ -757,21 +757,22 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
enum nl80211_channel_type ch_type;
int err;
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn = {
+ .mode = IEEE80211_CONN_MODE_HT,
+ .bw_limit = IEEE80211_CONN_BW_LIMIT_40,
+ };
u32 vht_cap_info = 0;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- conn_flags = IEEE80211_CONN_DISABLE_VHT;
-
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
+ conn.mode = IEEE80211_CONN_MODE_LEGACY;
fallthrough;
case NL80211_CHAN_WIDTH_20:
- conn_flags |= IEEE80211_CONN_DISABLE_40MHZ;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
break;
default:
break;
@@ -783,8 +784,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems,
ifibss->chandef.chan->band,
- vht_cap_info,
- conn_flags, ifibss->bssid, &csa_ie);
+ vht_cap_info, &conn,
+ ifibss->bssid, &csa_ie);
/* can't switch to destination channel, fail */
if (err < 0)
goto disconnect;
@@ -798,7 +799,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
goto disconnect;
params.count = csa_ie.count;
- params.chandef = csa_ie.chandef;
+ params.chandef = csa_ie.chanreq.oper;
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -857,7 +858,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
params.radar_required = err;
if (cfg80211_chandef_identical(&params.chandef,
- &sdata->vif.bss_conf.chandef)) {
+ &sdata->vif.bss_conf.chanreq.oper)) {
ibss_dbg(sdata,
"received csa with an identical chandef, ignoring\n");
return true;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 0b2b53550bd9..b6fead612b66 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef IEEE80211_I_H
@@ -370,19 +370,32 @@ enum ieee80211_sta_flags {
IEEE80211_STA_ENABLE_RRM = BIT(15),
};
-typedef u32 __bitwise ieee80211_conn_flags_t;
-
-enum ieee80211_conn_flags {
- IEEE80211_CONN_DISABLE_HT = (__force ieee80211_conn_flags_t)BIT(0),
- IEEE80211_CONN_DISABLE_40MHZ = (__force ieee80211_conn_flags_t)BIT(1),
- IEEE80211_CONN_DISABLE_VHT = (__force ieee80211_conn_flags_t)BIT(2),
- IEEE80211_CONN_DISABLE_80P80MHZ = (__force ieee80211_conn_flags_t)BIT(3),
- IEEE80211_CONN_DISABLE_160MHZ = (__force ieee80211_conn_flags_t)BIT(4),
- IEEE80211_CONN_DISABLE_HE = (__force ieee80211_conn_flags_t)BIT(5),
- IEEE80211_CONN_DISABLE_EHT = (__force ieee80211_conn_flags_t)BIT(6),
- IEEE80211_CONN_DISABLE_320MHZ = (__force ieee80211_conn_flags_t)BIT(7),
+enum ieee80211_conn_mode {
+ IEEE80211_CONN_MODE_S1G,
+ IEEE80211_CONN_MODE_LEGACY,
+ IEEE80211_CONN_MODE_HT,
+ IEEE80211_CONN_MODE_VHT,
+ IEEE80211_CONN_MODE_HE,
+ IEEE80211_CONN_MODE_EHT,
};
+#define IEEE80211_CONN_MODE_HIGHEST IEEE80211_CONN_MODE_EHT
+
+enum ieee80211_conn_bw_limit {
+ IEEE80211_CONN_BW_LIMIT_20,
+ IEEE80211_CONN_BW_LIMIT_40,
+ IEEE80211_CONN_BW_LIMIT_80,
+ IEEE80211_CONN_BW_LIMIT_160, /* also 80+80 */
+ IEEE80211_CONN_BW_LIMIT_320,
+};
+
+struct ieee80211_conn_settings {
+ enum ieee80211_conn_mode mode;
+ enum ieee80211_conn_bw_limit bw_limit;
+};
+
+extern const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited;
+
struct ieee80211_mgd_auth_data {
struct cfg80211_bss *bss;
unsigned long timeout;
@@ -416,7 +429,7 @@ struct ieee80211_mgd_assoc_data {
size_t elems_len;
u8 *elems; /* pointing to inside ie[] below */
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn;
u16 status;
@@ -441,6 +454,7 @@ struct ieee80211_mgd_assoc_data {
bool timeout_started;
bool comeback; /* whether the AP has requested association comeback */
bool s1g;
+ bool spp_amsdu;
unsigned int assoc_link_id;
@@ -509,6 +523,8 @@ struct ieee80211_if_managed {
unsigned int flags;
+ u16 mcast_seq_last;
+
bool status_acked;
bool status_received;
__le16 status_fc;
@@ -579,6 +595,10 @@ struct ieee80211_if_managed {
/* TID-to-link mapping support */
struct wiphy_delayed_work ttlm_work;
struct ieee80211_adv_ttlm_info ttlm_info;
+
+ /* dialog token enumerator for neg TTLM request */
+ u8 dialog_token_alloc;
+ struct wiphy_delayed_work neg_ttlm_timeout_work;
};
struct ieee80211_if_ibss {
@@ -866,6 +886,9 @@ struct ieee80211_chanctx {
enum ieee80211_chanctx_mode mode;
bool driver_present;
+ /* temporary data for search algorithm etc. */
+ struct ieee80211_chan_req req;
+
struct ieee80211_chanctx_conf conf;
};
@@ -938,7 +961,7 @@ struct ieee80211_link_data_managed {
enum ieee80211_smps_mode req_smps, /* requested smps mode */
driver_smps_mode; /* smps mode request */
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn;
s16 p2p_noa_index;
@@ -983,8 +1006,6 @@ struct ieee80211_link_data_managed {
int mu_edca_last_param_set;
u8 bss_param_ch_cnt;
-
- struct cfg80211_bss *bss;
};
struct ieee80211_link_data_ap {
@@ -1013,11 +1034,10 @@ struct ieee80211_link_data {
struct ieee80211_key __rcu *default_beacon_key;
struct wiphy_work csa_finalize_work;
- bool csa_block_tx;
bool operating_11g_mode;
- struct cfg80211_chan_def csa_chandef;
+ struct ieee80211_chan_req csa_chanreq;
struct wiphy_work color_change_finalize_work;
struct delayed_work color_collision_detect_work;
@@ -1025,7 +1045,7 @@ struct ieee80211_link_data {
/* context reservation -- protected with wiphy mutex */
struct ieee80211_chanctx *reserved_chanctx;
- struct cfg80211_chan_def reserved_chandef;
+ struct ieee80211_chan_req reserved;
bool reserved_radar_required;
bool reserved_ready;
@@ -1072,6 +1092,8 @@ struct ieee80211_sub_if_data {
unsigned long state;
+ bool csa_blocked_tx;
+
char name[IFNAMSIZ];
struct ieee80211_fragment_cache frags;
@@ -1160,6 +1182,19 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
#define sdata_dereference(p, sdata) \
wiphy_dereference(sdata->local->hw.wiphy, p)
+#define for_each_sdata_link(_local, _link) \
+ /* outer loop just to define the variables ... */ \
+ for (struct ieee80211_sub_if_data *___sdata = NULL; \
+ !___sdata; \
+ ___sdata = (void *)~0 /* always stop */) \
+ list_for_each_entry(___sdata, &(_local)->interfaces, list) \
+ if (ieee80211_sdata_running(___sdata)) \
+ for (int ___link_id = 0; \
+ ___link_id < ARRAY_SIZE(___sdata->link); \
+ ___link_id++) \
+ if ((_link = wiphy_dereference((local)->hw.wiphy, \
+ ___sdata->link[___link_id])))
+
static inline int
ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
struct cfg80211_rnr_elems *rnr_elems,
@@ -1330,7 +1365,8 @@ struct ieee80211_local {
bool wiphy_ciphers_allocated;
- bool use_chanctx;
+ struct cfg80211_chan_def dflt_chandef;
+ bool emulate_chanctx;
/* protects the aggregated multicast list and filter calls */
spinlock_t filter_lock;
@@ -1456,8 +1492,6 @@ struct ieee80211_local {
enum mac80211_scan_state next_scan_state;
struct wiphy_delayed_work scan_work;
struct ieee80211_sub_if_data __rcu *scan_sdata;
- /* For backward compatibility only -- do not use */
- struct cfg80211_chan_def _oper_chandef;
/* Temporary remain-on-channel for off-channel operations */
struct ieee80211_channel *tmp_channel;
@@ -1531,8 +1565,6 @@ struct ieee80211_local {
int user_power_level; /* in dBm, for all interfaces */
- enum ieee80211_smps_mode smps_mode;
-
struct work_struct restart_work;
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1559,7 +1591,7 @@ struct ieee80211_local {
/* virtual monitor interface */
struct ieee80211_sub_if_data __rcu *monitor_sdata;
- struct cfg80211_chan_def monitor_chandef;
+ struct ieee80211_chan_req monitor_chanreq;
/* extended capabilities provided by mac80211 */
u8 ext_capa[8];
@@ -1624,7 +1656,7 @@ ieee80211_get_link_sband(struct ieee80211_link_data *link)
/* this struct holds the value parsing from channel switch IE */
struct ieee80211_csa_ie {
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq;
u8 mode;
u8 count;
u8 ttl;
@@ -1633,6 +1665,14 @@ struct ieee80211_csa_ie {
u32 max_switch_time;
};
+enum ieee80211_elems_parse_error {
+ IEEE80211_PARSE_ERR_INVALID_END = BIT(0),
+ IEEE80211_PARSE_ERR_DUP_ELEM = BIT(1),
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE = BIT(2),
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM = BIT(3),
+ IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC = BIT(4),
+};
+
/* Parsed Information Elements */
struct ieee802_11_elems {
const u8 *ie_start;
@@ -1727,12 +1767,6 @@ struct ieee802_11_elems {
size_t ml_basic_len;
size_t ml_reconf_len;
- /* The basic Multi-Link element in the original IEs */
- const struct element *ml_basic_elem;
-
- /* The reconfiguration Multi-Link element in the original IEs */
- const struct element *ml_reconf_elem;
-
u8 ttlm_num;
/*
@@ -1743,16 +1777,8 @@ struct ieee802_11_elems {
struct ieee80211_mle_per_sta_profile *prof;
size_t sta_prof_len;
- /* whether a parse error occurred while retrieving these elements */
- bool parse_error;
-
- /*
- * scratch buffer that can be used for various element parsing related
- * tasks, e.g., element de-fragmentation etc.
- */
- size_t scratch_len;
- u8 *scratch_pos;
- u8 scratch[] __counted_by(scratch_len);
+ /* whether/which parse error occurred while retrieving these elements */
+ u8 parse_error;
};
static inline struct ieee80211_local *hw_to_local(
@@ -1801,6 +1827,8 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
unsigned int mpdu_len,
unsigned int mpdu_offset);
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
+int ieee80211_hw_conf_chan(struct ieee80211_local *local);
+void ieee80211_hw_conf_init(struct ieee80211_local *local);
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed);
@@ -2112,7 +2140,7 @@ enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta);
-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta);
+void ieee80211_sta_init_nss(struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
enum nl80211_chan_width
@@ -2166,9 +2194,8 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
* @elems: parsed 802.11 elements received with the frame
* @current_band: indicates the current band
* @vht_cap_info: VHT capabilities of the transmitter
- * @conn_flags: contains information about own capabilities and restrictions
- * to decide which channel switch announcements can be accepted, using
- * flags from &enum ieee80211_conn_flags.
+ * @conn: contains information about own capabilities and restrictions
+ * to decide which channel switch announcements can be accepted
* @bssid: the currently connected bssid (for reporting)
* @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl.
* All of them will be filled with if success only.
@@ -2178,7 +2205,8 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band current_band,
u32 vht_cap_info,
- ieee80211_conn_flags_t conn_flags, u8 *bssid,
+ struct ieee80211_conn_settings *conn,
+ u8 *bssid,
struct ieee80211_csa_ie *csa_ie);
/* Suspend/resume and hw reconfiguration */
@@ -2202,6 +2230,9 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
/* utility functions/constants */
extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
+const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode);
+enum ieee80211_conn_bw_limit
+ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef);
int ieee80211_frame_duration(enum nl80211_band band, size_t len,
int rate, int erp, int short_preamble);
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -2243,6 +2274,7 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
/**
* struct ieee80211_elems_parse_params - element parsing parameters
+ * @mode: connection mode for parsing
* @start: pointer to the elements
* @len: length of the elements
* @action: %true if the elements came from an action frame
@@ -2260,6 +2292,7 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
* for EHT capabilities parsing)
*/
struct ieee80211_elems_parse_params {
+ enum ieee80211_conn_mode mode;
const u8 *start;
size_t len;
bool action;
@@ -2279,6 +2312,7 @@ ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
struct cfg80211_bss *bss)
{
struct ieee80211_elems_parse_params params = {
+ .mode = IEEE80211_CONN_MODE_HIGHEST,
.start = start,
.len = len,
.action = action,
@@ -2408,7 +2442,6 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *da, const u8 *bssid,
u16 stype, u16 reason,
bool send_frame, u8 *frame_buf);
-u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end);
enum {
IEEE80211_PROBE_FLAG_DIRECTED = BIT(0),
@@ -2453,32 +2486,36 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
const struct cfg80211_chan_def *chandef);
-u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
-u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- u8 *end);
-void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode,
- struct sk_buff *skb);
+u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata);
u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef);
u8 *ieee80211_ie_build_eht_oper(u8 *pos, struct cfg80211_chan_def *chandef,
const struct ieee80211_sta_eht_cap *eht_cap);
int ieee80211_parse_bitrates(enum nl80211_chan_width width,
const struct ieee80211_supported_band *sband,
const u8 *srates, int srates_len, u32 *rates);
-int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band);
-int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band);
u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_s1g_cap *caps,
struct sk_buff *skb);
void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
-u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap);
+
+/* element building in SKBs */
+int ieee80211_put_srates_elem(struct sk_buff *skb,
+ const struct ieee80211_supported_band *sband,
+ u32 basic_rates, u32 rate_flags, u32 masked_rates,
+ u8 element_id);
+int ieee80211_put_he_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn);
+int ieee80211_put_he_6ghz_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode);
+int ieee80211_put_eht_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn);
/* channel management */
bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
@@ -2488,23 +2525,36 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
const struct ieee80211_ht_operation *htop,
struct cfg80211_chan_def *chandef);
void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
- bool support_160, bool support_320,
struct cfg80211_chan_def *chandef);
-bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef);
bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
struct cfg80211_chan_def *chandef);
-ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
+void ieee80211_chandef_downgrade(struct cfg80211_chan_def *chandef,
+ struct ieee80211_conn_settings *conn);
+static inline void
+ieee80211_chanreq_downgrade(struct ieee80211_chan_req *chanreq,
+ struct ieee80211_conn_settings *conn)
+{
+ ieee80211_chandef_downgrade(&chanreq->oper, conn);
+ if (WARN_ON(!conn))
+ return;
+ if (conn->mode < IEEE80211_CONN_MODE_EHT)
+ chanreq->ap.chan = NULL;
+}
+
+bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b);
int __must_check
ieee80211_link_use_channel(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *req,
enum ieee80211_chanctx_mode mode);
int __must_check
ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *req,
enum ieee80211_chanctx_mode mode,
bool radar_required);
int __must_check
@@ -2512,9 +2562,9 @@ ieee80211_link_use_reserved_context(struct ieee80211_link_data *link);
int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link);
int __must_check
-ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- u64 *changed);
+ieee80211_link_change_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *req,
+ u64 *changed);
void ieee80211_link_release_channel(struct ieee80211_link_data *link);
void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link);
void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
@@ -2561,7 +2611,7 @@ int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
-void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
+void ieee80211_teardown_tdls_peers(struct ieee80211_link_data *link);
void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
const u8 *peer, u16 reason);
void
@@ -2589,12 +2639,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
-u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
-u8 *ieee80211_ie_build_eht_cap(u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- const struct ieee80211_sta_eht_cap *eht_cap,
- u8 *end,
- bool for_ap);
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata);
void
ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
@@ -2603,6 +2648,12 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
u8 eht_cap_len,
struct link_sta_info *link_sta);
+void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len);
+void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len);
+int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ttlm_params *params);
void ieee80211_check_wbrf_support(struct ieee80211_local *local);
void ieee80211_add_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 11c4caa4748e..395de62d9cb2 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -8,7 +8,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -511,7 +511,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
* would have removed them, but in other modes there shouldn't
* be any stations.
*/
- flushed = sta_info_flush(sdata);
+ flushed = sta_info_flush(sdata, -1);
WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN && flushed > 0);
/* don't count this interface for allmulti while it is down */
@@ -544,10 +544,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
sdata->vif.bss_conf.csa_active = false;
if (sdata->vif.type == NL80211_IFTYPE_STATION)
sdata->deflink.u.mgd.csa_waiting_bcn = false;
- if (sdata->deflink.csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->deflink.csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
@@ -557,7 +557,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
&sdata->deflink.dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chandef;
+ chandef = sdata->vif.bss_conf.chanreq.oper;
WARN_ON(local->suspended);
ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev, &chandef,
@@ -1164,7 +1164,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
rcu_assign_pointer(local->monitor_sdata, sdata);
mutex_unlock(&local->iflist_mtx);
- ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chandef,
+ ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chanreq,
IEEE80211_CHANCTX_EXCLUSIVE);
if (ret) {
mutex_lock(&local->iflist_mtx);
@@ -1252,7 +1252,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
sdata->vif.cab_queue = master->vif.cab_queue;
memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
sizeof(sdata->vif.hw_queue));
- sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+ sdata->vif.bss_conf.chanreq = master->vif.bss_conf.chanreq;
sdata->crypto_tx_tailroom_needed_cnt +=
master->crypto_tx_tailroom_needed_cnt;
@@ -1288,8 +1288,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
res = drv_start(local);
if (res)
goto err_del_bss;
- /* we're brought up, everything changes */
- hw_reconf_flags = ~0;
ieee80211_led_radio(local, true);
ieee80211_mod_tpt_led_trig(local,
IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
@@ -1436,7 +1434,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
if (coming_up)
local->open_count++;
- if (hw_reconf_flags)
+ if (local->open_count == 1)
+ ieee80211_hw_conf_init(local);
+ else if (hw_reconf_flags)
ieee80211_hw_config(local, hw_reconf_flags);
ieee80211_recalc_ps(local);
@@ -1546,6 +1546,22 @@ static void ieee80211_iface_process_skb(struct ieee80211_local *local,
default:
break;
}
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_EHT) {
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ switch (mgmt->u.action.u.ttlm_req.action_code) {
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
+ ieee80211_process_neg_ttlm_req(sdata, mgmt,
+ skb->len);
+ break;
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
+ ieee80211_process_neg_ttlm_res(sdata, mgmt,
+ skb->len);
+ break;
+ default:
+ break;
+ }
+ }
} else if (ieee80211_is_ext(mgmt->frame_control)) {
if (sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_sta_rx_queued_ext(sdata, skb);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index af74d7f9d94d..eecdd2265eaa 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -6,7 +6,7 @@
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright 2018-2020, 2022-2023 Intel Corporation
+ * Copyright 2018-2020, 2022-2024 Intel Corporation
*/
#include <crypto/utils.h>
@@ -925,6 +925,10 @@ int ieee80211_key_link(struct ieee80211_key *key,
*/
key->color = atomic_inc_return(&key_color);
+ /* keep this flag for easier access later */
+ if (sta && sta->sta.spp_amsdu)
+ key->conf.flags |= IEEE80211_KEY_FLAG_SPP_AMSDU;
+
increment_tailroom_need_count(sdata);
ret = ieee80211_key_replace(sdata, link, sta, pairwise, old_key, key);
@@ -1368,12 +1372,19 @@ EXPORT_SYMBOL_GPL(ieee80211_remove_key);
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf)
+ struct ieee80211_key_conf *keyconf,
+ int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_key *key;
int err;
+ struct ieee80211_link_data *link_data =
+ link_id < 0 ? &sdata->deflink :
+ sdata_dereference(sdata->link[link_id], sdata);
+
+ if (WARN_ON(!link_data))
+ return ERR_PTR(-EINVAL);
if (WARN_ON(!local->wowlan))
return ERR_PTR(-EINVAL);
@@ -1390,8 +1401,9 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
- /* FIXME: this function needs to get a link ID */
- err = ieee80211_key_link(key, &sdata->deflink, NULL);
+ key->conf.link_id = link_id;
+
+ err = ieee80211_key_link(key, link_data, NULL);
if (err)
return ERR_PTR(err);
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index d4f86955afa6..685ec66b4264 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -2,7 +2,7 @@
/*
* MLO link handling
*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -73,6 +73,8 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
ieee80211_mgd_stop_link(link);
cancel_delayed_work_sync(&link->color_collision_detect_work);
+ wiphy_work_cancel(link->sdata->local->hw.wiphy,
+ &link->csa_finalize_work);
ieee80211_link_release_channel(link);
}
@@ -354,7 +356,7 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
link = sdata_dereference(sdata->link[link_id], sdata);
- /* FIXME: kill TDLS connections on the link */
+ ieee80211_teardown_tdls_peers(link);
ieee80211_link_release_channel(link);
}
@@ -402,7 +404,8 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
link = sdata_dereference(sdata->link[link_id], sdata);
- ret = ieee80211_link_use_channel(link, &link->conf->chandef,
+ ret = ieee80211_link_use_channel(link,
+ &link->conf->chanreq,
IEEE80211_CHANCTX_SHARED);
WARN_ON_ONCE(ret);
@@ -444,6 +447,9 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links)
lockdep_assert_wiphy(local->hw.wiphy);
+ if (WARN_ON(!active_links))
+ return -EINVAL;
+
if (!drv_can_activate_links(local, sdata, active_links))
return -EINVAL;
@@ -472,6 +478,9 @@ void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ if (WARN_ON(!active_links))
+ return;
+
if (!ieee80211_sdata_running(sdata))
return;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f2ece7793573..4eaea0a9975b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -93,16 +93,32 @@ static void ieee80211_reconfig_filter(struct wiphy *wiphy,
ieee80211_configure_filter(local);
}
-static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
+static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local,
+ struct ieee80211_chanctx_conf *ctx)
{
struct ieee80211_sub_if_data *sdata;
struct cfg80211_chan_def chandef = {};
+ struct cfg80211_chan_def *oper = NULL;
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_STATIC;
u32 changed = 0;
int power;
u32 offchannel_flag;
+ if (!local->emulate_chanctx)
+ return 0;
+
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ if (ctx && !WARN_ON(!ctx->def.chan)) {
+ oper = &ctx->def;
+ if (ctx->rx_chains_static > 1)
+ smps_mode = IEEE80211_SMPS_OFF;
+ else if (ctx->rx_chains_dynamic > 1)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_mode = IEEE80211_SMPS_STATIC;
+ }
+
if (local->scan_chandef.chan) {
chandef = local->scan_chandef;
} else if (local->tmp_channel) {
@@ -110,25 +126,30 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
chandef.center_freq1 = chandef.chan->center_freq;
chandef.freq1_offset = chandef.chan->freq_offset;
- } else
- chandef = local->_oper_chandef;
+ } else if (oper) {
+ chandef = *oper;
+ } else {
+ chandef = local->dflt_chandef;
+ }
- WARN(!cfg80211_chandef_valid(&chandef),
- "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz",
- chandef.chan->center_freq, chandef.chan->freq_offset,
- chandef.width, chandef.center_freq1, chandef.freq1_offset,
- chandef.center_freq2);
+ if (WARN(!cfg80211_chandef_valid(&chandef),
+ "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz",
+ chandef.chan ? chandef.chan->center_freq : -1,
+ chandef.chan ? chandef.chan->freq_offset : 0,
+ chandef.width, chandef.center_freq1, chandef.freq1_offset,
+ chandef.center_freq2))
+ return 0;
- if (!cfg80211_chandef_identical(&chandef, &local->_oper_chandef))
+ if (!oper || !cfg80211_chandef_identical(&chandef, oper))
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
else
local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (offchannel_flag ||
- !cfg80211_chandef_identical(&local->hw.conf.chandef,
- &local->_oper_chandef)) {
+ /* force it also for scanning, since drivers might config differently */
+ if (offchannel_flag || local->scanning ||
+ !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) {
local->hw.conf.chandef = chandef;
changed |= IEEE80211_CONF_CHANGE_CHANNEL;
}
@@ -140,8 +161,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
* that otherwise STATIC is used.
*/
local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
- } else if (local->hw.conf.smps_mode != local->smps_mode) {
- local->hw.conf.smps_mode = local->smps_mode;
+ } else if (local->hw.conf.smps_mode != smps_mode) {
+ local->hw.conf.smps_mode = smps_mode;
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
@@ -173,12 +194,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
might_sleep();
- if (!local->use_chanctx)
- changed |= ieee80211_hw_conf_chan(local);
- else
- changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
- IEEE80211_CONF_CHANGE_POWER |
- IEEE80211_CONF_CHANGE_SMPS);
+ WARN_ON(changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_SMPS));
if (changed && local->open_count) {
ret = drv_config(local, changed);
@@ -202,13 +220,115 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
return ret;
}
+/* for scanning, offchannel and chanctx emulation only */
+static int _ieee80211_hw_conf_chan(struct ieee80211_local *local,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ u32 changed;
+
+ if (!local->open_count)
+ return 0;
+
+ changed = ieee80211_calc_hw_conf_chan(local, ctx);
+ if (!changed)
+ return 0;
+
+ return drv_config(local, changed);
+}
+
+int ieee80211_hw_conf_chan(struct ieee80211_local *local)
+{
+ struct ieee80211_chanctx *ctx;
+
+ ctx = list_first_entry_or_null(&local->chanctx_list,
+ struct ieee80211_chanctx,
+ list);
+
+ return _ieee80211_hw_conf_chan(local, ctx ? &ctx->conf : NULL);
+}
+
+void ieee80211_hw_conf_init(struct ieee80211_local *local)
+{
+ u32 changed = ~(IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_SMPS);
+
+ if (WARN_ON(!local->open_count))
+ return;
+
+ if (local->emulate_chanctx) {
+ struct ieee80211_chanctx *ctx;
+
+ ctx = list_first_entry_or_null(&local->chanctx_list,
+ struct ieee80211_chanctx,
+ list);
+
+ changed |= ieee80211_calc_hw_conf_chan(local,
+ ctx ? &ctx->conf : NULL);
+ }
+
+ WARN_ON(drv_config(local, changed));
+}
+
+int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = ctx->radar_enabled;
+
+ return _ieee80211_hw_conf_chan(local, ctx);
+}
+EXPORT_SYMBOL(ieee80211_emulate_add_chanctx);
+
+void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = false;
+
+ _ieee80211_hw_conf_chan(local, NULL);
+}
+EXPORT_SYMBOL(ieee80211_emulate_remove_chanctx);
+
+void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = ctx->radar_enabled;
+
+ _ieee80211_hw_conf_chan(local, ctx);
+}
+EXPORT_SYMBOL(ieee80211_emulate_change_chanctx);
+
+int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ if (n_vifs <= 0)
+ return -EINVAL;
+
+ local->hw.conf.radar_enabled = vifs[0].new_ctx->radar_enabled;
+ _ieee80211_hw_conf_chan(local, vifs[0].new_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(ieee80211_emulate_switch_vif_chanctx);
+
#define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\
BSS_CHANGED_IDLE |\
BSS_CHANGED_PS |\
BSS_CHANGED_IBSS |\
BSS_CHANGED_ARP_FILTER |\
BSS_CHANGED_SSID |\
- BSS_CHANGED_MLD_VALID_LINKS)
+ BSS_CHANGED_MLD_VALID_LINKS |\
+ BSS_CHANGED_MLD_TTLM)
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed)
@@ -644,7 +764,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
struct ieee80211_local *local;
int priv_size, i;
struct wiphy *wiphy;
- bool use_chanctx;
+ bool emulate_chanctx;
if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
!ops->add_interface || !ops->remove_interface ||
@@ -659,12 +779,26 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
return NULL;
/* check all or no channel context operations exist */
- i = !!ops->add_chanctx + !!ops->remove_chanctx +
- !!ops->change_chanctx + !!ops->assign_vif_chanctx +
- !!ops->unassign_vif_chanctx;
- if (WARN_ON(i != 0 && i != 5))
- return NULL;
- use_chanctx = i == 5;
+ if (ops->add_chanctx == ieee80211_emulate_add_chanctx &&
+ ops->remove_chanctx == ieee80211_emulate_remove_chanctx &&
+ ops->change_chanctx == ieee80211_emulate_change_chanctx) {
+ if (WARN_ON(ops->assign_vif_chanctx ||
+ ops->unassign_vif_chanctx))
+ return NULL;
+ emulate_chanctx = true;
+ } else {
+ if (WARN_ON(ops->add_chanctx == ieee80211_emulate_add_chanctx ||
+ ops->remove_chanctx == ieee80211_emulate_remove_chanctx ||
+ ops->change_chanctx == ieee80211_emulate_change_chanctx))
+ return NULL;
+ if (WARN_ON(!ops->add_chanctx ||
+ !ops->remove_chanctx ||
+ !ops->change_chanctx ||
+ !ops->assign_vif_chanctx ||
+ !ops->unassign_vif_chanctx))
+ return NULL;
+ emulate_chanctx = false;
+ }
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wiphy priv data for both our ieee80211_local and for
@@ -698,7 +832,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
WIPHY_FLAG_REPORTS_OBSS |
WIPHY_FLAG_OFFCHAN_TX;
- if (!use_chanctx || ops->remain_on_channel)
+ if (emulate_chanctx || ops->remain_on_channel)
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
@@ -734,8 +868,11 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
}
- if (!ops->set_key)
+ if (!ops->set_key) {
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+ }
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM);
@@ -752,7 +889,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
local->ops = ops;
- local->use_chanctx = use_chanctx;
+ local->emulate_chanctx = emulate_chanctx;
+
+ if (emulate_chanctx)
+ ieee80211_hw_set(&local->hw, CHANCTX_STA_CSA);
/*
* We need a bit of data queued to build aggregates properly, so
@@ -829,7 +969,6 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
ieee80211_dfs_radar_detected_work);
wiphy_work_init(&local->reconfig_filter, ieee80211_reconfig_filter);
- local->smps_mode = IEEE80211_SMPS_OFF;
wiphy_work_init(&local->dynamic_ps_enable_work,
ieee80211_dynamic_ps_enable_work);
@@ -980,7 +1119,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
* as much, e.g. monitoring beacons would be hard if we
* might not even know which link is active at which time.
*/
- if (WARN_ON(!local->use_chanctx))
+ if (WARN_ON(local->emulate_chanctx))
return -EINVAL;
if (WARN_ON(!local->ops->link_info_changed))
@@ -1024,7 +1163,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
return -EINVAL;
#endif
- if (!local->use_chanctx) {
+ if (local->emulate_chanctx) {
for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *comb;
@@ -1090,11 +1229,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
&sband->channels[i],
NL80211_CHAN_NO_HT);
/* init channel we're on */
- if (!local->use_chanctx && !local->_oper_chandef.chan) {
+ local->monitor_chanreq.oper = dflt_chandef;
+ if (local->emulate_chanctx) {
+ local->dflt_chandef = dflt_chandef;
local->hw.conf.chandef = dflt_chandef;
- local->_oper_chandef = dflt_chandef;
}
- local->monitor_chandef = dflt_chandef;
}
channels += sband->n_channels;
@@ -1115,8 +1254,26 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_vht = supp_vht || sband->vht_cap.vht_supported;
for_each_sband_iftype_data(sband, i, iftd) {
+ u8 he_40_mhz_cap;
+
supp_he = supp_he || iftd->he_cap.has_he;
supp_eht = supp_eht || iftd->eht_cap.has_eht;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_40_mhz_cap =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else
+ he_40_mhz_cap =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+
+ /* currently no support for HE client where HT has 40 MHz but not HT */
+ if (iftd->he_cap.has_he &&
+ iftd->types_mask & (BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT)) &&
+ sband->ht_cap.ht_supported &&
+ sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ !(iftd->he_cap.he_cap_elem.phy_cap_info[0] & he_40_mhz_cap))
+ return -EINVAL;
}
/* HT, VHT, HE require QoS, thus >= 4 queues */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index fccbcde3359a..32475da98d73 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*/
@@ -97,7 +97,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.bss_conf.basic_rates != basic_rates)
return false;
- cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
+ cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chanreq.oper.chan,
NL80211_CHAN_NO_HT);
ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
@@ -107,10 +107,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
ie->vht_operation, ie->ht_operation,
&sta_chan_def);
- ieee80211_chandef_he_6ghz_oper(sdata, ie->he_operation, ie->eht_operation,
+ ieee80211_chandef_he_6ghz_oper(sdata->local, ie->he_operation,
+ ie->eht_operation,
&sta_chan_def);
- if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
+ if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chanreq.oper,
&sta_chan_def))
return false;
@@ -435,9 +436,9 @@ int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!sband->ht_cap.ht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -476,16 +477,16 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!ht_cap->ht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
return -ENOMEM;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
- ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef,
+ ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chanreq.oper,
sdata->vif.bss_conf.ht_operation_mode,
false);
@@ -507,9 +508,9 @@ int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!sband->vht_cap.vht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
@@ -548,9 +549,9 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!vht_cap->vht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation))
@@ -558,7 +559,7 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
ieee80211_ie_build_vht_oper(pos, vht_cap,
- &sdata->vif.bss_conf.chandef);
+ &sdata->vif.bss_conf.chanreq.oper);
return 0;
}
@@ -566,29 +567,18 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
- const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
- u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
- he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
-
- if (!he_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ if (sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
- if (skb_tailroom(skb) < ie_len)
- return -ENOMEM;
-
- pos = skb_put(skb, ie_len);
- ieee80211_ie_build_he_cap(0, pos, he_cap, pos + ie_len);
-
- return 0;
+ return ieee80211_put_he_cap(skb, sdata, sband, NULL);
}
int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
@@ -605,20 +595,20 @@ int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!he_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + sizeof(struct ieee80211_he_operation);
- if (sdata->vif.bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
+ if (sdata->vif.bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
len += sizeof(struct ieee80211_he_6ghz_oper);
if (skb_tailroom(skb) < len)
return -ENOMEM;
pos = skb_put(skb, len);
- ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chandef);
+ ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chanreq.oper);
return 0;
}
@@ -639,37 +629,25 @@ int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata,
if (!iftd)
return 0;
- ieee80211_ie_build_he_6ghz_cap(sdata, sdata->deflink.smps_mode, skb);
+ ieee80211_put_he_6ghz_cap(skb, sdata, sdata->deflink.smps_mode);
return 0;
}
int mesh_add_eht_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
struct ieee80211_supported_band *sband;
- u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
- he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
- eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
- if (!he_cap || !eht_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ if (sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
- if (skb_tailroom(skb) < ie_len)
- return -ENOMEM;
-
- pos = skb_put(skb, ie_len);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + ie_len, false);
-
- return 0;
+ return ieee80211_put_eht_cap(skb, sdata, sband, NULL);
}
int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
@@ -685,9 +663,9 @@ int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *sk
eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!eht_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + offsetof(struct ieee80211_eht_operation, optional) +
@@ -697,7 +675,7 @@ int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *sk
return -ENOMEM;
pos = skb_put(skb, len);
- ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chandef, eht_cap);
+ ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chanreq.oper, eht_cap);
return 0;
}
@@ -745,9 +723,9 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
return;
if (!ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT) ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return;
sdata->vif.bss_conf.he_support = true;
@@ -966,24 +944,22 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
int head_len, tail_len;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- struct ieee80211_chanctx_conf *chanctx_conf;
struct mesh_csa_settings *csa;
- enum nl80211_band band;
+ const struct ieee80211_supported_band *sband;
u8 ie_len_he_cap, ie_len_eht_cap;
u8 *pos;
struct ieee80211_sub_if_data *sdata;
int hdr_len = offsetofend(struct ieee80211_mgmt, u.beacon);
+ u32 rate_flags;
sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
- band = chanctx_conf->def.chan->band;
- rcu_read_unlock();
- ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
- ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
+ sband = ieee80211_get_sband(sdata);
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
+
+ ie_len_he_cap = ieee80211_ie_len_he_cap(sdata);
+ ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata);
head_len = hdr_len +
2 + /* NULL SSID */
/* Channel Switch Announcement */
@@ -1107,7 +1083,9 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
}
rcu_read_unlock();
- if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
+ if (ieee80211_put_srates_elem(skb, sband,
+ sdata->vif.bss_conf.basic_rates,
+ rate_flags, 0, WLAN_EID_SUPP_RATES) ||
mesh_add_ds_params_ie(sdata, skb))
goto out_free;
@@ -1118,7 +1096,9 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
skb_trim(skb, 0);
bcn->tail = bcn->head + bcn->head_len;
- if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
+ if (ieee80211_put_srates_elem(skb, sband,
+ sdata->vif.bss_conf.basic_rates,
+ rate_flags, 0, WLAN_EID_EXT_SUPP_RATES) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_ht_cap_ie(sdata, skb) ||
mesh_add_ht_oper_ie(sdata, skb) ||
@@ -1234,7 +1214,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
netif_carrier_off(sdata->dev);
/* flush STAs and mpaths on this iface */
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
ieee80211_free_keys(sdata, true);
mesh_path_flush_by_iface(sdata);
@@ -1276,11 +1256,12 @@ static void ieee80211_mesh_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
* unavailable.
*/
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
- &sdata->vif.bss_conf.chandef,
+ &sdata->vif.bss_conf.chanreq.oper,
NL80211_IFTYPE_MESH_POINT);
if (err > 0)
cfg80211_radar_event(sdata->local->hw.wiphy,
- &sdata->vif.bss_conf.chandef, GFP_ATOMIC);
+ &sdata->vif.bss_conf.chanreq.oper,
+ GFP_ATOMIC);
}
static bool
@@ -1292,7 +1273,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_supported_band *sband;
int err;
- ieee80211_conn_flags_t conn_flags = 0;
+ struct ieee80211_conn_settings conn = ieee80211_conn_settings_unlimited;
u32 vht_cap_info = 0;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -1301,15 +1282,18 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
if (!sband)
return false;
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- fallthrough;
+ conn.mode = IEEE80211_CONN_MODE_LEGACY;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_20:
- conn_flags |= IEEE80211_CONN_DISABLE_40MHZ;
- fallthrough;
+ conn.mode = IEEE80211_CONN_MODE_HT;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_40:
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
+ conn.mode = IEEE80211_CONN_MODE_HT;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_40;
break;
default:
break;
@@ -1321,8 +1305,8 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band,
- vht_cap_info,
- conn_flags, sdata->vif.addr,
+ vht_cap_info, &conn,
+ sdata->vif.addr,
&csa_ie);
if (err < 0)
return false;
@@ -1335,7 +1319,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
if (csa_ie.reason_code == WLAN_REASON_MESH_CHAN_REGULATORY)
ieee80211_mesh_csa_mark_radar(sdata);
- params.chandef = csa_ie.chandef;
+ params.chandef = csa_ie.chanreq.oper;
params.count = csa_ie.count;
if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
@@ -1371,7 +1355,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
params.radar_required = err;
if (cfg80211_chandef_identical(&params.chandef,
- &sdata->vif.bss_conf.chandef)) {
+ &sdata->vif.bss_conf.chanreq.oper)) {
mcsa_dbg(sdata,
"received csa with an identical chandef, ignoring\n");
return true;
@@ -1551,7 +1535,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
*changed |= BSS_CHANGED_BEACON;
mcsa_dbg(sdata, "complete switching to center freq %d MHz",
- sdata->vif.bss_conf.chandef.chan->center_freq);
+ sdata->vif.bss_conf.chanreq.oper.chan->center_freq);
return 0;
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index ad8469293d71..d913ce7ba72e 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*/
@@ -94,6 +94,7 @@ enum mesh_deferred_task_flags {
* @is_root: the destination station of this path is a root node
* @is_gate: the destination station of this path is a mesh gate
* @path_change_count: the number of path changes to destination
+ * @fast_tx_check: timestamp of last fast-xmit enable attempt
*
*
* The dst address is unique in the mesh path table. Since the mesh_path is
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 735edde1bd81..91b55d6a68b9 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -600,11 +600,10 @@ unlock_sta:
void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
{
unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
- struct mesh_tx_cache *cache;
+ struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
return;
@@ -622,7 +621,6 @@ void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (entry->mpath == mpath)
@@ -637,7 +635,6 @@ void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (rcu_access_pointer(entry->mpath->next_hop) == sta)
@@ -651,7 +648,6 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
if (entry)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 28bf794f67f8..8f2b492a9fe9 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2019, 2021-2024 Intel Corporation
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*/
#include <linux/gfp.h>
@@ -163,7 +163,7 @@ static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
u16 ht_opmode;
bool non_ht_sta = false, ht20_sta = false;
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
@@ -196,7 +196,7 @@ static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
if (non_ht_sta)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
else if (ht20_sta &&
- sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+ sdata->vif.bss_conf.chanreq.oper.width > NL80211_CHAN_WIDTH_20)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
else
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -226,10 +226,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.self_prot);
int err = -ENOMEM;
- ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
- ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
+ ie_len_he_cap = ieee80211_ie_len_he_cap(sdata);
+ ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata);
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + /* capability info */
@@ -266,14 +264,13 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
if (action != WLAN_SP_MESH_PEERING_CLOSE) {
struct ieee80211_supported_band *sband;
- enum nl80211_band band;
+ u32 rate_flags, basic_rates;
sband = ieee80211_get_sband(sdata);
if (!sband) {
err = -EINVAL;
goto free;
}
- band = sband->band;
/* capability info */
pos = skb_put_zero(skb, 2);
@@ -282,8 +279,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2);
put_unaligned_le16(sta->sta.aid, pos);
}
- if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
- ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
+
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
+ basic_rates = sdata->vif.bss_conf.basic_rates;
+
+ if (ieee80211_put_srates_elem(skb, sband, basic_rates,
+ rate_flags, 0,
+ WLAN_EID_SUPP_RATES) ||
+ ieee80211_put_srates_elem(skb, sband, basic_rates,
+ rate_flags, 0,
+ WLAN_EID_EXT_SUPP_RATES) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_meshid_ie(sdata, skb) ||
mesh_add_meshconf_ie(sdata, skb))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2022a26eb881..47a2cba8313f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -46,6 +46,8 @@
#define IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS msecs_to_jiffies(100)
#define IEEE80211_ADV_TTLM_ST_UNDERFLOW 0xff00
+#define IEEE80211_NEG_TTLM_REQ_TIMEOUT (HZ / 5)
+
static int max_nullfunc_tries = 2;
module_param(max_nullfunc_tries, int, 0644);
MODULE_PARM_DESC(max_nullfunc_tries,
@@ -92,84 +94,6 @@ MODULE_PARM_DESC(probe_wait_ms,
#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
/*
- * Extract from the given disabled subchannel bitmap (raw format
- * from the EHT Operation Element) the bits for the subchannel
- * we're using right now.
- */
-static u16
-ieee80211_extract_dis_subch_bmap(const struct ieee80211_eht_operation *eht_oper,
- struct cfg80211_chan_def *chandef, u16 bitmap)
-{
- struct ieee80211_eht_operation_info *info = (void *)eht_oper->optional;
- struct cfg80211_chan_def ap_chandef = *chandef;
- u32 ap_center_freq, local_center_freq;
- u32 ap_bw, local_bw;
- int ap_start_freq, local_start_freq;
- u16 shift, mask;
-
- if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) ||
- !(eht_oper->params &
- IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
- return 0;
-
- /* set 160/320 supported to get the full AP definition */
- ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- true, true, &ap_chandef);
- ap_center_freq = ap_chandef.center_freq1;
- ap_bw = 20 * BIT(u8_get_bits(info->control,
- IEEE80211_EHT_OPER_CHAN_WIDTH));
- ap_start_freq = ap_center_freq - ap_bw / 2;
- local_center_freq = chandef->center_freq1;
- local_bw = 20 * BIT(ieee80211_chan_width_to_rx_bw(chandef->width));
- local_start_freq = local_center_freq - local_bw / 2;
- shift = (local_start_freq - ap_start_freq) / 20;
- mask = BIT(local_bw / 20) - 1;
-
- return (bitmap >> shift) & mask;
-}
-
-/*
- * Handle the puncturing bitmap, possibly downgrading bandwidth to get a
- * valid bitmap.
- */
-static void
-ieee80211_handle_puncturing_bitmap(struct ieee80211_link_data *link,
- const struct ieee80211_eht_operation *eht_oper,
- u16 bitmap, u64 *changed)
-{
- struct cfg80211_chan_def *chandef = &link->conf->chandef;
- struct ieee80211_local *local = link->sdata->local;
- u16 extracted;
- u64 _changed = 0;
-
- if (!changed)
- changed = &_changed;
-
- while (chandef->width > NL80211_CHAN_WIDTH_40) {
- extracted =
- ieee80211_extract_dis_subch_bmap(eht_oper, chandef,
- bitmap);
-
- if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- chandef) &&
- !(bitmap && ieee80211_hw_check(&local->hw,
- DISALLOW_PUNCTURING)))
- break;
- link->u.mgd.conn_flags |=
- ieee80211_chandef_downgrade(chandef);
- *changed |= BSS_CHANGED_BANDWIDTH;
- }
-
- if (chandef->width <= NL80211_CHAN_WIDTH_40)
- extracted = 0;
-
- if (link->conf->eht_puncturing != extracted) {
- link->conf->eht_puncturing = extracted;
- *changed |= BSS_CHANGED_EHT_PUNCTURING;
- }
-}
-
-/*
* We can have multiple work items (and connection probing)
* scheduling this timer, but we need to take care to only
* reschedule it when it should fire _earlier_ than it was
@@ -223,77 +147,84 @@ static int ecw2cw(int ecw)
return (1 << ecw) - 1;
}
-static ieee80211_conn_flags_t
-ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_link_data *link,
- ieee80211_conn_flags_t conn_flags,
- struct ieee80211_supported_band *sband,
- struct ieee80211_channel *channel,
- u32 vht_cap_info,
- const struct ieee80211_ht_operation *ht_oper,
- const struct ieee80211_vht_operation *vht_oper,
- const struct ieee80211_he_operation *he_oper,
- const struct ieee80211_eht_operation *eht_oper,
- const struct ieee80211_s1g_oper_ie *s1g_oper,
- struct cfg80211_chan_def *chandef, bool tracking)
+static enum ieee80211_conn_mode
+ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ u32 vht_cap_info,
+ const struct ieee802_11_elems *elems,
+ bool ignore_ht_channel_mismatch,
+ const struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *chandef)
{
+ const struct ieee80211_ht_operation *ht_oper = elems->ht_operation;
+ const struct ieee80211_vht_operation *vht_oper = elems->vht_operation;
+ const struct ieee80211_he_operation *he_oper = elems->he_operation;
+ const struct ieee80211_eht_operation *eht_oper = elems->eht_operation;
+ struct ieee80211_supported_band *sband =
+ sdata->local->hw.wiphy->bands[channel->band];
struct cfg80211_chan_def vht_chandef;
- struct ieee80211_sta_ht_cap sta_ht_cap;
- ieee80211_conn_flags_t ret;
+ bool no_vht = false;
u32 ht_cfreq;
- memset(chandef, 0, sizeof(struct cfg80211_chan_def));
- chandef->chan = channel;
- chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef->center_freq1 = channel->center_freq;
- chandef->freq1_offset = channel->freq_offset;
+ *chandef = (struct cfg80211_chan_def) {
+ .chan = channel,
+ .width = NL80211_CHAN_WIDTH_20_NOHT,
+ .center_freq1 = channel->center_freq,
+ .freq1_offset = channel->freq_offset,
+ };
- if (channel->band == NL80211_BAND_6GHZ) {
- if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, eht_oper,
- chandef)) {
- mlme_dbg(sdata,
- "bad 6 GHz operation, disabling HT/VHT/HE/EHT\n");
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- } else {
- ret = 0;
- }
- vht_chandef = *chandef;
- goto out;
- } else if (sband->band == NL80211_BAND_S1GHZ) {
- if (!ieee80211_chandef_s1g_oper(s1g_oper, chandef)) {
+ /* get special S1G case out of the way */
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ if (!ieee80211_chandef_s1g_oper(elems->s1g_oper, chandef)) {
sdata_info(sdata,
"Missing S1G Operation Element? Trying operating == primary\n");
chandef->width = ieee80211_s1g_channel_width(channel);
}
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
- goto out;
+ return IEEE80211_CONN_MODE_S1G;
}
- memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
- ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+ /* get special 6 GHz case out of the way */
+ if (sband->band == NL80211_BAND_6GHZ) {
+ enum ieee80211_conn_mode mode = IEEE80211_CONN_MODE_EHT;
- if (!ht_oper || !sta_ht_cap.ht_supported) {
- mlme_dbg(sdata, "HT operation missing / HT not supported\n");
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ /* this is an error */
+ if (conn->mode < IEEE80211_CONN_MODE_HE)
+ return IEEE80211_CONN_MODE_LEGACY;
+
+ if (!elems->he_6ghz_capa || !elems->he_cap) {
+ sdata_info(sdata,
+ "HE 6 GHz AP is missing HE/HE 6 GHz band capability\n");
+ return IEEE80211_CONN_MODE_LEGACY;
+ }
+
+ if (!eht_oper || !elems->eht_cap) {
+ eht_oper = NULL;
+ mode = IEEE80211_CONN_MODE_HE;
+ }
+
+ if (!ieee80211_chandef_he_6ghz_oper(sdata->local, he_oper,
+ eht_oper, chandef)) {
+ sdata_info(sdata, "bad HE/EHT 6 GHz operation\n");
+ return IEEE80211_CONN_MODE_LEGACY;
+ }
+
+ return mode;
}
+ /* now we have the progression HT, VHT, ... */
+ if (conn->mode < IEEE80211_CONN_MODE_HT)
+ return IEEE80211_CONN_MODE_LEGACY;
+
+ if (!ht_oper || !elems->ht_cap_elem)
+ return IEEE80211_CONN_MODE_LEGACY;
+
chandef->width = NL80211_CHAN_WIDTH_20;
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
channel->band);
/* check that channel matches the right operating channel */
- if (!tracking && channel->center_freq != ht_cfreq) {
+ if (!ignore_ht_channel_mismatch && channel->center_freq != ht_cfreq) {
/*
* It's possible that some APs are confused here;
* Netgear WNDR3700 sometimes reports 4 higher than
@@ -305,36 +236,22 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
"Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
channel->center_freq, ht_cfreq,
ht_oper->primary_chan, channel->band);
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ return IEEE80211_CONN_MODE_LEGACY;
}
- /* check 40 MHz support, if we have it */
- if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
- ieee80211_chandef_ht_oper(ht_oper, chandef);
- } else {
- mlme_dbg(sdata, "40 MHz not supported\n");
- /* 40 MHz (and 80 MHz) must be supported for VHT */
- ret = IEEE80211_CONN_DISABLE_VHT;
- /* also mark 40 MHz disabled */
- ret |= IEEE80211_CONN_DISABLE_40MHZ;
- goto out;
- }
+ ieee80211_chandef_ht_oper(ht_oper, chandef);
- if (!vht_oper || !sband->vht_cap.vht_supported) {
- mlme_dbg(sdata, "VHT operation missing / VHT not supported\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
- }
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
+ return IEEE80211_CONN_MODE_HT;
vht_chandef = *chandef;
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- he_oper &&
- (le32_to_cpu(he_oper->he_oper_params) &
- IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
+
+ /*
+ * having he_cap/he_oper parsed out implies we're at
+ * least operating as HE STA
+ */
+ if (elems->he_cap && he_oper &&
+ he_oper->he_oper_params & cpu_to_le32(IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
struct ieee80211_vht_operation he_oper_vht_cap;
/*
@@ -347,253 +264,614 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
&he_oper_vht_cap, ht_oper,
&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HE))
- sdata_info(sdata,
- "HE AP VHT information is invalid, disabling HE\n");
- ret = IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "HE AP VHT information is invalid, disabling HE\n");
+ /* this will cause us to re-parse as VHT STA */
+ return IEEE80211_CONN_MODE_VHT;
}
+ } else if (!vht_oper || !elems->vht_cap_elem) {
+ if (sband->band == NL80211_BAND_5GHZ) {
+ sdata_info(sdata,
+ "VHT information is missing, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
+ }
+ no_vht = true;
+ } else if (sband->band == NL80211_BAND_2GHZ) {
+ no_vht = true;
} else if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
vht_cap_info,
vht_oper, ht_oper,
&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information is invalid, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ sdata_info(sdata,
+ "AP VHT information is invalid, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
}
- if (!cfg80211_chandef_valid(&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information is invalid, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
+ sdata_info(sdata,
+ "AP VHT information doesn't match HT, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
}
- if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
- ret = 0;
- goto out;
- }
+ *chandef = vht_chandef;
- if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information doesn't match HT, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ /* stick to current max mode if we or the AP don't have HE */
+ if (conn->mode < IEEE80211_CONN_MODE_HE ||
+ !elems->he_operation || !elems->he_cap) {
+ if (no_vht)
+ return IEEE80211_CONN_MODE_HT;
+ return IEEE80211_CONN_MODE_VHT;
}
- *chandef = vht_chandef;
+ /* stick to HE if we or the AP don't have EHT */
+ if (conn->mode < IEEE80211_CONN_MODE_EHT ||
+ !eht_oper || !elems->eht_cap)
+ return IEEE80211_CONN_MODE_HE;
/*
* handle the case that the EHT operation indicates that it holds EHT
* operation information (in case that the channel width differs from
* the channel width reported in HT/VHT/HE).
*/
- if (eht_oper && (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
+ if (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
struct cfg80211_chan_def eht_chandef = *chandef;
ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- eht_chandef.width ==
- NL80211_CHAN_WIDTH_160,
- false, &eht_chandef);
+ &eht_chandef);
+
+ eht_chandef.punctured =
+ ieee80211_eht_oper_dis_subchan_bitmap(eht_oper);
if (!cfg80211_chandef_valid(&eht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
- sdata_info(sdata,
- "AP EHT information is invalid, disabling EHT\n");
- ret = IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "AP EHT information is invalid, disabling EHT\n");
+ return IEEE80211_CONN_MODE_HE;
}
if (!cfg80211_chandef_compatible(chandef, &eht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
- sdata_info(sdata,
- "AP EHT information is incompatible, disabling EHT\n");
- ret = IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "AP EHT information doesn't match HT/VHT/HE, disabling EHT\n");
+ return IEEE80211_CONN_MODE_HE;
}
*chandef = eht_chandef;
}
- ret = 0;
+ return IEEE80211_CONN_MODE_EHT;
+}
+
+static bool
+ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_he_operation *he_op)
+{
+ struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
+ u16 mcs_80_map_tx, mcs_80_map_rx;
+ u16 ap_min_req_set;
+ int nss;
+
+ if (!he_cap)
+ return false;
+
+ /* mcs_nss is right after he_cap info */
+ he_mcs_nss_supp = (void *)(he_cap + 1);
+
+ mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
+ mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
+
+ /* P802.11-REVme/D0.3
+ * 27.1.1 Introduction to the HE PHY
+ * ...
+ * An HE STA shall support the following features:
+ * ...
+ * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
+ * supported channel widths for HE SU PPDUs
+ */
+ if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ sdata_info(sdata,
+ "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
+ mcs_80_map_tx, mcs_80_map_rx);
+ return false;
+ }
+
+ if (!he_op)
+ return true;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
-out:
/*
- * When tracking the current AP, don't do any further checks if the
- * new chandef is identical to the one we're currently using for the
- * connection. This keeps us from playing ping-pong with regulatory,
- * without it the following can happen (for example):
- * - connect to an AP with 80 MHz, world regdom allows 80 MHz
- * - AP advertises regdom US
- * - CRDA loads regdom US with 80 MHz prohibited (old database)
- * - the code below detects an unsupported channel, downgrades, and
- * we disconnect from the AP in the caller
- * - disconnect causes CRDA to reload world regdomain and the game
- * starts anew.
- * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
+ * zeroes, which is nonsense, and completely inconsistent with itself
+ * (it doesn't have 8 streams). Accept the settings in this case anyway.
+ */
+ if (!ap_min_req_set)
+ return true;
+
+ /* make sure the AP is consistent with itself
*
- * It seems possible that there are still scenarios with CSA or real
- * bandwidth changes where a this could happen, but those cases are
- * less common and wouldn't completely prevent using the AP.
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * A STA that is operating in an HE BSS shall be able to receive and
+ * transmit at each of the <HE-MCS, NSS> tuple values indicated by the
+ * Basic HE-MCS And NSS Set field of the HE Operation parameter of the
+ * MLME-START.request primitive and shall be able to receive at each of
+ * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
+ * NSS Set field in the HE Capabilities parameter of the MLMESTART.request
+ * primitive
*/
- if (tracking &&
- cfg80211_chandef_identical(chandef, &link->conf->chandef))
- return ret;
+ for (nss = 8; nss > 0; nss--) {
+ u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+ u8 ap_rx_val;
+ u8 ap_tx_val;
+
+ if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
- /* don't print the message below for VHT mismatch if VHT is disabled */
- if (ret & IEEE80211_CONN_DISABLE_VHT)
- vht_chandef = *chandef;
+ ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
+ ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
+
+ if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
+ sdata_info(sdata,
+ "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
+ nss, ap_rx_val, ap_rx_val, ap_op_val);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_he_operation *he_op)
+{
+ const struct ieee80211_sta_he_cap *sta_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ u16 ap_min_req_set;
+ int i;
+
+ if (!sta_he_cap || !he_op)
+ return false;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
/*
- * Ignore the DISABLED flag when we're already connected and only
- * tracking the APs beacon for bandwidth changes - otherwise we
- * might get disconnected here if we connect to an AP, update our
- * regulatory information based on the AP's country IE and the
- * information we have is wrong/outdated and disables the channel
- * that we're actually using for the connection to the AP.
+ * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
+ * zeroes, which is nonsense, and completely inconsistent with itself
+ * (it doesn't have 8 streams). Accept the settings in this case anyway.
*/
- while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
- tracking ? 0 :
- IEEE80211_CHAN_DISABLED)) {
- if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- break;
+ if (!ap_min_req_set)
+ return true;
+
+ /* Need to go over for 80MHz, 160MHz and for 80+80 */
+ for (i = 0; i < 3; i++) {
+ const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
+ &sta_he_cap->he_mcs_nss_supp;
+ u16 sta_mcs_map_rx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
+ u16 sta_mcs_map_tx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
+ u8 nss;
+ bool verified = true;
+
+ /*
+ * For each band there is a maximum of 8 spatial streams
+ * possible. Each of the sta_mcs_map_* is a 16-bit struct built
+ * of 2 bits per NSS (1-8), with the values defined in enum
+ * ieee80211_he_mcs_support. Need to make sure STA TX and RX
+ * capabilities aren't less than the AP's minimum requirements
+ * for this HE BSS per SS.
+ * It is enough to find one such band that meets the reqs.
+ */
+ for (nss = 8; nss > 0; nss--) {
+ u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
+ u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
+ u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+
+ if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
+
+ /*
+ * Make sure the HE AP doesn't require MCSs that aren't
+ * supported by the client as required by spec
+ *
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
+ * a BSS, unless it supports (i.e., is able to both transmit and
+ * receive using) all of the <HE-MCS, NSS> tuples in the basic
+ * HE-MCS and NSS set.
+ */
+ if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
+ verified = false;
+ break;
+ }
}
- ret |= ieee80211_chandef_downgrade(chandef);
+ if (verified)
+ return true;
+ }
+
+ /* If here, STA doesn't meet AP's HE min requirements */
+ return false;
+}
+
+static u8
+ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
+ const struct ieee80211_sta_eht_cap *sta_eht_cap,
+ unsigned int idx, int bw)
+{
+ u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
+ u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
+
+ /* handle us being a 20 MHz-only EHT STA - with four values
+ * for MCS 0-7, 8-9, 10-11, 12-13.
+ */
+ if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
+ return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
+
+ /* the others have MCS 0-9 together, rather than separately from 0-7 */
+ if (idx > 0)
+ idx--;
+
+ switch (bw) {
+ case 0:
+ return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
+ case 1:
+ if (!(he_phy_cap0 &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
+ return 0xff; /* pass check */
+ return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
+ case 2:
+ if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
+ return 0xff; /* pass check */
+ return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
}
- if (!he_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
- IEEE80211_CHAN_NO_HE))
- ret |= IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
+ WARN_ON(1);
+ return 0;
+}
- if (!eht_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
- IEEE80211_CHAN_NO_EHT))
- ret |= IEEE80211_CONN_DISABLE_EHT;
+static bool
+ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_eht_operation *eht_op)
+{
+ const struct ieee80211_sta_he_cap *sta_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_sta_eht_cap *sta_eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
+ unsigned int i;
- if (chandef->width != vht_chandef.width && !tracking)
+ if (!sta_he_cap || !sta_eht_cap || !eht_op)
+ return false;
+
+ req = &eht_op->basic_mcs_nss;
+
+ for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
+ u8 req_rx_nss, req_tx_nss;
+ unsigned int bw;
+
+ req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+ IEEE80211_EHT_MCS_NSS_RX);
+ req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ for (bw = 0; bw < 3; bw++) {
+ u8 have, have_rx_nss, have_tx_nss;
+
+ have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
+ sta_eht_cap,
+ i, bw);
+ have_rx_nss = u8_get_bits(have,
+ IEEE80211_EHT_MCS_NSS_RX);
+ have_tx_nss = u8_get_bits(have,
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ if (req_rx_nss > have_rx_nss ||
+ req_tx_nss > have_tx_nss)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ieee80211_chandef_usable(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags)
+{
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy,
+ chandef, prohibited_flags))
+ return false;
+
+ if (chandef->punctured &&
+ ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING))
+ return false;
+
+ return true;
+}
+
+static struct ieee802_11_elems *
+ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_bss *cbss, int link_id,
+ struct ieee80211_chan_req *chanreq)
+{
+ const struct cfg80211_bss_ies *ies = rcu_dereference(cbss->ies);
+ struct ieee80211_bss *bss = (void *)cbss->priv;
+ struct ieee80211_channel *channel = cbss->channel;
+ struct ieee80211_elems_parse_params parse_params = {
+ .link_id = -1,
+ .from_ap = true,
+ .start = ies->data,
+ .len = ies->len,
+ .mode = conn->mode,
+ };
+ struct ieee802_11_elems *elems;
+ struct ieee80211_supported_band *sband;
+ struct cfg80211_chan_def ap_chandef;
+ enum ieee80211_conn_mode ap_mode;
+ int ret;
+
+again:
+ elems = ieee802_11_parse_elems_full(&parse_params);
+ if (!elems)
+ return ERR_PTR(-ENOMEM);
+
+ ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info,
+ elems, false, conn, &ap_chandef);
+
+ mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
+ cbss->bssid, ieee80211_conn_mode_str(ap_mode));
+
+ /* this should be impossible since parsing depends on our mode */
+ if (WARN_ON(ap_mode > conn->mode)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ sband = sdata->local->hw.wiphy->bands[channel->band];
+
+ switch (channel->band) {
+ case NL80211_BAND_S1GHZ:
+ if (WARN_ON(ap_mode != IEEE80211_CONN_MODE_S1G)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ return elems;
+ case NL80211_BAND_6GHZ:
+ if (ap_mode < IEEE80211_CONN_MODE_HE) {
+ sdata_info(sdata,
+ "Rejecting non-HE 6/7 GHz connection");
+ ret = -EINVAL;
+ goto free;
+ }
+ break;
+ default:
+ if (WARN_ON(ap_mode == IEEE80211_CONN_MODE_S1G)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+
+ switch (ap_mode) {
+ case IEEE80211_CONN_MODE_S1G:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto free;
+ case IEEE80211_CONN_MODE_LEGACY:
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
+ case IEEE80211_CONN_MODE_HT:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_40);
+ break;
+ case IEEE80211_CONN_MODE_VHT:
+ case IEEE80211_CONN_MODE_HE:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ break;
+ case IEEE80211_CONN_MODE_EHT:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_320);
+ break;
+ }
+
+ conn->mode = ap_mode;
+ chanreq->oper = ap_chandef;
+
+ /* wider-bandwidth OFDMA is only done in EHT */
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !(sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW))
+ chanreq->ap = ap_chandef;
+ else
+ chanreq->ap.chan = NULL;
+
+ while (!ieee80211_chandef_usable(sdata, &chanreq->oper,
+ IEEE80211_CHAN_DISABLED)) {
+ if (WARN_ON(chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ ieee80211_chanreq_downgrade(chanreq, conn);
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_HE &&
+ !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper,
+ IEEE80211_CHAN_NO_HE)) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper,
+ IEEE80211_CHAN_NO_EHT)) {
+ conn->mode = IEEE80211_CONN_MODE_HE;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ }
+
+ if (chanreq->oper.width != ap_chandef.width || ap_mode != conn->mode)
sdata_info(sdata,
- "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
+ "regulatory prevented using AP config, downgraded\n");
- WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
- return ret;
+ if (conn->mode >= IEEE80211_CONN_MODE_HE &&
+ (!ieee80211_verify_peer_he_mcs_support(sdata, (void *)elems->he_cap,
+ elems->he_operation) ||
+ !ieee80211_verify_sta_he_mcs_support(sdata, sband,
+ elems->he_operation))) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ sdata_info(sdata, "required MCSes not supported, disabling HE\n");
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !ieee80211_verify_sta_eht_mcs_support(sdata, sband,
+ elems->eht_operation)) {
+ conn->mode = IEEE80211_CONN_MODE_HE;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ sdata_info(sdata, "required MCSes not supported, disabling EHT\n");
+ }
+
+ /* the mode can only decrease, so this must terminate */
+ if (ap_mode != conn->mode)
+ goto again;
+
+ mlme_link_id_dbg(sdata, link_id,
+ "connecting with %s mode, max bandwidth %d MHz\n",
+ ieee80211_conn_mode_str(conn->mode),
+ 20 * (1 << conn->bw_limit));
+
+ if (WARN_ON_ONCE(!cfg80211_chandef_valid(&chanreq->oper))) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return elems;
+free:
+ kfree(elems);
+ return ERR_PTR(ret);
}
static int ieee80211_config_bw(struct ieee80211_link_data *link,
- const struct ieee80211_ht_cap *ht_cap,
- const struct ieee80211_vht_cap *vht_cap,
- const struct ieee80211_ht_operation *ht_oper,
- const struct ieee80211_vht_operation *vht_oper,
- const struct ieee80211_he_operation *he_oper,
- const struct ieee80211_eht_operation *eht_oper,
- const struct ieee80211_s1g_oper_ie *s1g_oper,
- const u8 *bssid, u64 *changed)
+ struct ieee802_11_elems *elems,
+ bool update, u64 *changed)
{
+ struct ieee80211_channel *channel = link->conf->chanreq.oper.chan;
struct ieee80211_sub_if_data *sdata = link->sdata;
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_channel *chan = link->conf->chandef.chan;
- struct ieee80211_supported_band *sband =
- local->hw.wiphy->bands[chan->band];
- struct cfg80211_chan_def chandef;
- u16 ht_opmode;
- ieee80211_conn_flags_t flags;
+ struct ieee80211_chan_req chanreq = {};
+ enum ieee80211_conn_mode ap_mode;
u32 vht_cap_info = 0;
+ u16 ht_opmode;
int ret;
- /* if HT was/is disabled, don't track any bandwidth changes */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT || !ht_oper)
+ /* don't track any bandwidth changes in legacy/S1G modes */
+ if (link->u.mgd.conn.mode == IEEE80211_CONN_MODE_LEGACY ||
+ link->u.mgd.conn.mode == IEEE80211_CONN_MODE_S1G)
return 0;
- /* don't check VHT if we associated as non-VHT station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
- vht_oper = NULL;
+ if (elems->vht_cap_elem)
+ vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info);
- /* don't check HE if we associated as non-HE station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE ||
- !ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
- he_oper = NULL;
- eht_oper = NULL;
+ ap_mode = ieee80211_determine_ap_chan(sdata, channel, vht_cap_info,
+ elems, true, &link->u.mgd.conn,
+ &chanreq.ap);
+
+ if (ap_mode != link->u.mgd.conn.mode) {
+ link_info(link,
+ "AP appears to change mode (expected %s, found %s), disconnect\n",
+ ieee80211_conn_mode_str(link->u.mgd.conn.mode),
+ ieee80211_conn_mode_str(ap_mode));
+ return -EINVAL;
}
- /* don't check EHT if we associated as non-EHT station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT ||
- !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif))
- eht_oper = NULL;
+ chanreq.oper = chanreq.ap;
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT ||
+ sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW)
+ chanreq.ap.chan = NULL;
/*
- * if bss configuration changed store the new one -
+ * if HT operation mode changed store the new one -
* this may be applicable even if channel is identical
*/
- ht_opmode = le16_to_cpu(ht_oper->operation_mode);
- if (link->conf->ht_operation_mode != ht_opmode) {
- *changed |= BSS_CHANGED_HT;
- link->conf->ht_operation_mode = ht_opmode;
+ if (elems->ht_operation) {
+ ht_opmode = le16_to_cpu(elems->ht_operation->operation_mode);
+ if (link->conf->ht_operation_mode != ht_opmode) {
+ *changed |= BSS_CHANGED_HT;
+ link->conf->ht_operation_mode = ht_opmode;
+ }
}
- if (vht_cap)
- vht_cap_info = le32_to_cpu(vht_cap->vht_cap_info);
-
- /* calculate new channel (type) based on HT/VHT/HE operation IEs */
- flags = ieee80211_determine_chantype(sdata, link,
- link->u.mgd.conn_flags,
- sband, chan, vht_cap_info,
- ht_oper, vht_oper,
- he_oper, eht_oper,
- s1g_oper, &chandef, true);
-
/*
* Downgrade the new channel if we associated with restricted
- * capabilities. For example, if we associated as a 20 MHz STA
- * to a 40 MHz AP (due to regulatory, capabilities or config
- * reasons) then switching to a 40 MHz channel now won't do us
- * any good -- we couldn't use it with the AP.
+ * bandwidth capabilities. For example, if we associated as a
+ * 20 MHz STA to a 40 MHz AP (due to regulatory, capabilities
+ * or config reasons) then switching to a 40 MHz channel now
+ * won't do us any good -- we couldn't use it with the AP.
*/
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
- chandef.width == NL80211_CHAN_WIDTH_80P80)
- flags |= ieee80211_chandef_downgrade(&chandef);
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
- chandef.width == NL80211_CHAN_WIDTH_160)
- flags |= ieee80211_chandef_downgrade(&chandef);
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_40MHZ &&
- chandef.width > NL80211_CHAN_WIDTH_20)
- flags |= ieee80211_chandef_downgrade(&chandef);
-
- if (cfg80211_chandef_identical(&chandef, &link->conf->chandef))
+ while (link->u.mgd.conn.bw_limit <
+ ieee80211_min_bw_limit_from_chandef(&chanreq.oper))
+ ieee80211_chandef_downgrade(&chanreq.oper, NULL);
+
+ if (ieee80211_chanreq_identical(&chanreq, &link->conf->chanreq))
return 0;
link_info(link,
- "AP %pM changed bandwidth, new config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
- link->u.mgd.bssid, chandef.chan->center_freq,
- chandef.chan->freq_offset, chandef.width,
- chandef.center_freq1, chandef.freq1_offset,
- chandef.center_freq2);
-
- if (flags != (link->u.mgd.conn_flags &
- (IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT |
- IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ |
- IEEE80211_CONN_DISABLE_320MHZ)) ||
- !cfg80211_chandef_valid(&chandef)) {
+ "AP %pM changed bandwidth, new used config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
+ link->u.mgd.bssid, chanreq.oper.chan->center_freq,
+ chanreq.oper.chan->freq_offset, chanreq.oper.width,
+ chanreq.oper.center_freq1, chanreq.oper.freq1_offset,
+ chanreq.oper.center_freq2);
+
+ if (!cfg80211_chandef_valid(&chanreq.oper)) {
sdata_info(sdata,
- "AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n",
- link->u.mgd.bssid, flags, ifmgd->flags);
+ "AP %pM changed caps/bw in a way we can't support - disconnect\n",
+ link->u.mgd.bssid);
return -EINVAL;
}
- ret = ieee80211_link_change_bandwidth(link, &chandef, changed);
+ if (!update) {
+ link->conf->chanreq = chanreq;
+ return 0;
+ }
+ /*
+ * We're tracking the current AP here, so don't do any further checks
+ * here. This keeps us from playing ping-pong with regulatory, without
+ * it the following can happen (for example):
+ * - connect to an AP with 80 MHz, world regdom allows 80 MHz
+ * - AP advertises regdom US
+ * - CRDA loads regdom US with 80 MHz prohibited (old database)
+ * - we detect an unsupported channel and disconnect
+ * - disconnect causes CRDA to reload world regdomain and the game
+ * starts anew.
+ * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ *
+ * It seems possible that there are still scenarios with CSA or real
+ * bandwidth changes where a this could happen, but those cases are
+ * less common and wouldn't completely prevent using the AP.
+ */
+
+ ret = ieee80211_link_change_chanreq(link, &chanreq, changed);
if (ret) {
sdata_info(sdata,
"AP %pM changed bandwidth to incompatible one - disconnect\n",
@@ -612,7 +890,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps,
- ieee80211_conn_flags_t conn_flags)
+ const struct ieee80211_conn_settings *conn)
{
u8 *pos;
u32 flags = channel->flags;
@@ -647,7 +925,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
* capable of 40 MHz -- some broken APs will never fall
* back to trying to transmit in 20 MHz.
*/
- if (conn_flags & IEEE80211_CONN_DISABLE_40MHZ) {
+ if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_20) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
@@ -686,7 +964,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_supported_band *sband,
struct ieee80211_vht_cap *ap_vht_cap,
- ieee80211_conn_flags_t conn_flags)
+ const struct ieee80211_conn_settings *conn)
{
struct ieee80211_local *local = sdata->local;
u8 *pos;
@@ -703,16 +981,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
/* determine capability flags */
cap = vht_cap.cap;
- if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ) {
- u32 bw = cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
-
- cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- if (bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ ||
- bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
- cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
- }
-
- if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ) {
+ if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_80) {
cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
}
@@ -769,79 +1038,12 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
return mu_mimo_owner;
}
-/* This function determines HE capability flags for the association
- * and builds the IE.
- */
-static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_supported_band *sband,
- enum ieee80211_smps_mode smps_mode,
- ieee80211_conn_flags_t conn_flags)
-{
- u8 *pos, *pre_he_pos;
- const struct ieee80211_sta_he_cap *he_cap;
- u8 he_cap_size;
-
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- if (WARN_ON(!he_cap))
- return;
-
- /* get a max size estimate */
- he_cap_size =
- 2 + 1 + sizeof(he_cap->he_cap_elem) +
- ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
- ieee80211_he_ppe_size(he_cap->ppe_thres[0],
- he_cap->he_cap_elem.phy_cap_info);
- pos = skb_put(skb, he_cap_size);
- pre_he_pos = pos;
- pos = ieee80211_ie_build_he_cap(conn_flags,
- pos, he_cap, pos + he_cap_size);
- /* trim excess if any */
- skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos));
-
- ieee80211_ie_build_he_6ghz_cap(sdata, smps_mode, skb);
-}
-
-static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_supported_band *sband)
-{
- u8 *pos;
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
- u8 eht_cap_size;
-
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
-
- /*
- * EHT capabilities element is only added if the HE capabilities element
- * was added so assume that 'he_cap' is valid and don't check it.
- */
- if (WARN_ON(!he_cap || !eht_cap))
- return;
-
- eht_cap_size =
- 2 + 1 + sizeof(eht_cap->eht_cap_elem) +
- ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem,
- false) +
- ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
- pos = skb_put(skb, eht_cap_size);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size,
- false);
-}
-
static void ieee80211_assoc_add_rates(struct sk_buff *skb,
enum nl80211_chan_width width,
struct ieee80211_supported_band *sband,
struct ieee80211_mgd_assoc_data *assoc_data)
{
- unsigned int rates_len, supp_rates_len;
- u32 rates = 0;
- int i, count;
- u8 *pos;
+ u32 rates;
if (assoc_data->supp_rates_len) {
/*
@@ -850,53 +1052,23 @@ static void ieee80211_assoc_add_rates(struct sk_buff *skb,
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode)...
*/
- rates_len = ieee80211_parse_bitrates(width, sband,
- assoc_data->supp_rates,
- assoc_data->supp_rates_len,
- &rates);
+ ieee80211_parse_bitrates(width, sband,
+ assoc_data->supp_rates,
+ assoc_data->supp_rates_len,
+ &rates);
} else {
/*
* In case AP not provide any supported rates information
* before association, we send information element(s) with
* all rates that we support.
*/
- rates_len = sband->n_bitrates;
- for (i = 0; i < sband->n_bitrates; i++)
- rates |= BIT(i);
- }
-
- supp_rates_len = rates_len;
- if (supp_rates_len > 8)
- supp_rates_len = 8;
-
- pos = skb_put(skb, supp_rates_len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
-
- count = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = (u8)rate;
- if (++count == 8)
- break;
- }
+ rates = ~0;
}
- if (rates_len > count) {
- pos = skb_put(skb, rates_len - count + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = rates_len - count;
-
- for (i++; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate;
-
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = (u8)rate;
- }
- }
- }
+ ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates,
+ WLAN_EID_SUPP_RATES);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates,
+ WLAN_EID_EXT_SUPP_RATES);
}
static size_t ieee80211_add_before_ht_elems(struct sk_buff *skb,
@@ -1133,11 +1305,11 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
- !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HT) {
ieee80211_add_ht_ie(sdata, skb,
assoc_data->link[link_id].ap_ht_param,
sband, chan, smps_mode,
- assoc_data->link[link_id].conn_flags);
+ &assoc_data->link[link_id].conn);
ADD_PRESENT_ELEM(WLAN_EID_HT_CAPABILITY);
}
@@ -1147,37 +1319,28 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
- !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_VHT &&
+ sband->vht_cap.vht_supported) {
bool mu_mimo_owner =
ieee80211_add_vht_ie(sdata, skb, sband,
&assoc_data->link[link_id].ap_vht_cap,
- assoc_data->link[link_id].conn_flags);
+ &assoc_data->link[link_id].conn);
if (link)
link->conf->mu_mimo_owner = mu_mimo_owner;
ADD_PRESENT_ELEM(WLAN_EID_VHT_CAPABILITY);
}
- /*
- * If AP doesn't support HT, mark HE and EHT as disabled.
- * If on the 5GHz band, make sure it supports VHT.
- */
- if (assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT ||
- (sband->band == NL80211_BAND_5GHZ &&
- assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT))
- assoc_data->link[link_id].conn_flags |=
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
-
/* if present, add any custom IEs that go before HE */
offset = ieee80211_add_before_he_elems(skb, extra_elems,
extra_elems_len,
offset);
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HE)) {
- ieee80211_add_he_ie(sdata, skb, sband, smps_mode,
- assoc_data->link[link_id].conn_flags);
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HE) {
+ ieee80211_put_he_cap(skb, sdata, sband,
+ &assoc_data->link[link_id].conn);
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY);
+ ieee80211_put_he_6ghz_cap(skb, sdata, smps_mode);
}
/*
@@ -1185,7 +1348,7 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
* calling ieee80211_assoc_add_ml_elem(), so add this one if
* we're going to put it after the ML element
*/
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT)
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_EHT_CAPABILITY);
if (link_id == assoc_data->assoc_link_id)
@@ -1195,8 +1358,9 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
/* crash if somebody gets it wrong */
present_elems = NULL;
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
- ieee80211_add_eht_ie(sdata, skb, sband);
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT)
+ ieee80211_put_eht_cap(skb, sdata, sband,
+ &assoc_data->link[link_id].conn);
if (sband->band == NL80211_BAND_S1GHZ) {
ieee80211_add_aid_request_ie(sdata, skb);
@@ -1206,9 +1370,6 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
if (iftd && iftd->vendor_elems.data && iftd->vendor_elems.len)
skb_put_data(skb, iftd->vendor_elems.data, iftd->vendor_elems.len);
- if (link)
- link->u.mgd.conn_flags = assoc_data->link[link_id].conn_flags;
-
return offset;
}
@@ -1318,8 +1479,6 @@ static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EML_CAPA);
skb_put_data(skb, &eml_capa, sizeof(eml_capa));
}
- /* need indication from userspace to support this */
- mld_capa_ops &= ~cpu_to_le16(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP);
skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops));
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
@@ -1499,7 +1658,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
/* Set MBSSID support for HE AP if needed */
if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE &&
ext_capa && ext_capa->datalen >= 3)
ext_capa->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
@@ -1544,7 +1703,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
* for some reason check it and want it to be set, set the bit for all
* pre-EHT connections as we used to do.
*/
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT)
capab |= WLAN_CAPABILITY_ESS;
/* add the elements for the assoc (main) link */
@@ -1741,8 +1900,8 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
return;
}
- if (!cfg80211_chandef_identical(&link->conf->chandef,
- &link->csa_chandef)) {
+ if (!ieee80211_chanreq_identical(&link->conf->chanreq,
+ &link->csa_chanreq)) {
sdata_info(sdata,
"failed to finalize channel switch, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
@@ -1767,19 +1926,14 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
WARN_ON(!link->conf->csa_active);
- if (link->csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- link->csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
link->conf->csa_active = false;
link->u.mgd.csa_waiting_bcn = false;
- /*
- * If the CSA IE is still present on the beacon after the switch,
- * we need to consider it as a new CSA (possibly to self).
- */
- link->u.mgd.beacon_crc_valid = false;
ret = drv_post_channel_switch(link);
if (ret) {
@@ -1790,8 +1944,8 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
return;
}
- cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef,
- link->link_id, 0);
+ cfg80211_ch_switch_notify(sdata->dev, &link->reserved.oper,
+ link->link_id);
}
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
@@ -1838,14 +1992,15 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
ieee80211_link_unreserve_chanctx(link);
- if (link->csa_block_tx)
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
+ sdata->csa_blocked_tx = false;
+ }
- link->csa_block_tx = false;
link->conf->csa_active = false;
- drv_abort_channel_switch(sdata);
+ drv_abort_channel_switch(link);
}
static void
@@ -1857,12 +2012,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct cfg80211_bss *cbss = link->u.mgd.bss;
+ struct cfg80211_bss *cbss = link->conf->bss;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
enum nl80211_band current_band;
struct ieee80211_csa_ie csa_ie;
- struct ieee80211_channel_switch ch_switch;
+ struct ieee80211_channel_switch ch_switch = {
+ .link_id = link->link_id,
+ };
struct ieee80211_bss *bss;
unsigned long timeout;
int res;
@@ -1876,14 +2033,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
bss = (void *)cbss->priv;
res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
bss->vht_cap_info,
- link->u.mgd.conn_flags,
+ &link->u.mgd.conn,
link->u.mgd.bssid, &csa_ie);
if (!res) {
ch_switch.timestamp = timestamp;
ch_switch.device_timestamp = device_timestamp;
ch_switch.block_tx = csa_ie.mode;
- ch_switch.chandef = csa_ie.chandef;
+ ch_switch.chandef = csa_ie.chanreq.oper;
ch_switch.count = csa_ie.count;
ch_switch.delay = csa_ie.max_switch_time;
}
@@ -1891,46 +2048,62 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
if (res < 0)
goto drop_connection;
- if (beacon && link->conf->csa_active &&
- !link->u.mgd.csa_waiting_bcn) {
- if (res)
+ if (link->conf->csa_active) {
+ /* already processing - disregard action frames */
+ if (!beacon)
+ return;
+
+ if (link->u.mgd.csa_waiting_bcn) {
+ ieee80211_chswitch_post_beacon(link);
+ /*
+ * If the CSA IE is still present in the beacon after
+ * the switch, we need to consider it as a new CSA
+ * (possibly to self) - this happens by not returning
+ * here so we'll get to the check below.
+ */
+ } else if (res) {
ieee80211_sta_abort_chanswitch(link);
- else
+ return;
+ } else {
drv_channel_switch_rx_beacon(sdata, &ch_switch);
- return;
- } else if (link->conf->csa_active || res) {
- /* disregard subsequent announcements if already processing */
- return;
+ return;
+ }
}
- if (link->conf->chandef.chan->band !=
- csa_ie.chandef.chan->band) {
+ /* nothing to do at all - no active CSA nor a new one */
+ if (res)
+ return;
+
+ if (link->conf->chanreq.oper.chan->band !=
+ csa_ie.chanreq.oper.chan->band) {
sdata_info(sdata,
"AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
link->u.mgd.bssid,
- csa_ie.chandef.chan->center_freq,
- csa_ie.chandef.width, csa_ie.chandef.center_freq1,
- csa_ie.chandef.center_freq2);
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
- if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef,
+ if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chanreq.oper,
IEEE80211_CHAN_DISABLED)) {
sdata_info(sdata,
"AP %pM switches to unsupported channel "
"(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), "
"disconnecting\n",
link->u.mgd.bssid,
- csa_ie.chandef.chan->center_freq,
- csa_ie.chandef.chan->freq_offset,
- csa_ie.chandef.width, csa_ie.chandef.center_freq1,
- csa_ie.chandef.freq1_offset,
- csa_ie.chandef.center_freq2);
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.chan->freq_offset,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.freq1_offset,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
- if (cfg80211_chandef_identical(&csa_ie.chandef,
- &link->conf->chandef) &&
+ if (cfg80211_chandef_identical(&csa_ie.chanreq.oper,
+ &link->conf->chanreq.oper) &&
(!csa_ie.mode || !beacon)) {
if (link->u.mgd.csa_ignored_same_chan)
return;
@@ -1942,12 +2115,13 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
}
/*
- * Drop all TDLS peers - either we disconnect or move to a different
- * channel from this point on. There's no telling what our peer will do.
+ * Drop all TDLS peers on the affected link - either we disconnect or
+ * move to a different channel from this point on. There's no telling
+ * what our peer will do.
* The TDLS WIDER_BW scenario is also problematic, as peers might now
* have an incompatible wider chandef.
*/
- ieee80211_teardown_tdls_peers(sdata);
+ ieee80211_teardown_tdls_peers(link);
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->hw.wiphy->mtx));
@@ -1959,8 +2133,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
- if (local->use_chanctx &&
- !ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
+ if (!ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
sdata_info(sdata,
"driver doesn't support chan-switch with channel contexts\n");
goto drop_connection;
@@ -1972,7 +2145,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
goto drop_connection;
}
- res = ieee80211_link_reserve_chanctx(link, &csa_ie.chandef,
+ res = ieee80211_link_reserve_chanctx(link, &csa_ie.chanreq,
chanctx->mode, false);
if (res) {
sdata_info(sdata,
@@ -1982,18 +2155,20 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
}
link->conf->csa_active = true;
- link->csa_chandef = csa_ie.chandef;
- link->csa_block_tx = csa_ie.mode;
+ link->csa_chanreq = csa_ie.chanreq;
link->u.mgd.csa_ignored_same_chan = false;
link->u.mgd.beacon_crc_valid = false;
- if (link->csa_block_tx)
+ if (csa_ie.mode &&
+ !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
+ sdata->csa_blocked_tx = true;
+ }
- cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef,
+ cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chanreq.oper,
link->link_id, csa_ie.count,
- csa_ie.mode, 0);
+ csa_ie.mode);
if (local->ops->channel_switch) {
/* use driver's channel switch callback */
@@ -2017,7 +2192,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
* reset when the disconnection worker runs.
*/
link->conf->csa_active = true;
- link->csa_block_tx = csa_ie.mode;
+ sdata->csa_blocked_tx =
+ csa_ie.mode && !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA);
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
@@ -2414,7 +2590,7 @@ void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work)
struct ieee80211_link_data *link =
container_of(work, struct ieee80211_link_data,
dfs_cac_timer_work.work);
- struct cfg80211_chan_def chandef = link->conf->chandef;
+ struct cfg80211_chan_def chandef = link->conf->chanreq.oper;
struct ieee80211_sub_if_data *sdata = link->sdata;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -2769,7 +2945,7 @@ static u64 ieee80211_link_set_associated(struct ieee80211_link_data *link,
ieee80211_check_rate_mask(link);
- link->u.mgd.bss = cbss;
+ link->conf->bss = cbss;
memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN);
if (sdata->vif.p2p ||
@@ -2917,7 +3093,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ifmgd->associated = false;
/* other links will be destroyed */
- sdata->deflink.u.mgd.bss = NULL;
+ sdata->deflink.conf->bss = NULL;
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
netif_carrier_off(sdata->dev);
@@ -2992,7 +3168,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->vif.cfg.ssid_len = 0;
/* remove AP and TDLS peers */
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
/* finally reset all BSS / config parameters */
if (!ieee80211_vif_is_mld(&sdata->vif))
@@ -3058,7 +3234,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->deflink.u.mgd.disable_wmm_tracking = false;
ifmgd->flags = 0;
- sdata->deflink.u.mgd.conn_flags = 0;
for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
struct ieee80211_link_data *link;
@@ -3072,25 +3247,35 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.csa_active = false;
sdata->deflink.u.mgd.csa_waiting_bcn = false;
sdata->deflink.u.mgd.csa_ignored_same_chan = false;
- if (sdata->deflink.csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->deflink.csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
/* existing TX TSPEC sessions no longer exist */
memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec));
wiphy_delayed_work_cancel(local->hw.wiphy, &ifmgd->tx_tspec_wk);
+ sdata->vif.bss_conf.power_type = IEEE80211_REG_UNSET_AP;
sdata->vif.bss_conf.pwr_reduction = 0;
sdata->vif.bss_conf.tx_pwr_env_num = 0;
memset(sdata->vif.bss_conf.tx_pwr_env, 0,
sizeof(sdata->vif.bss_conf.tx_pwr_env));
+ sdata->vif.cfg.eml_cap = 0;
+ sdata->vif.cfg.eml_med_sync_delay = 0;
+ sdata->vif.cfg.mld_capa_op = 0;
+
memset(&sdata->u.mgd.ttlm_info, 0,
sizeof(sdata->u.mgd.ttlm_info));
wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
+
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->neg_ttlm_timeout_work);
ieee80211_vif_set_links(sdata, 0, 0);
+
+ ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
}
static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
@@ -3238,7 +3423,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst,
sdata->vif.cfg.ssid,
sdata->vif.cfg.ssid_len,
- sdata->deflink.u.mgd.bss->channel);
+ sdata->deflink.conf->bss->channel);
}
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -3321,7 +3506,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
return NULL;
if (ifmgd->associated)
- cbss = sdata->deflink.u.mgd.bss;
+ cbss = sdata->deflink.conf->bss;
else if (ifmgd->auth_data)
cbss = ifmgd->auth_data->bss;
else if (ifmgd->assoc_data && ifmgd->assoc_data->link[0].bss)
@@ -3378,9 +3563,12 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
if (!ifmgd->associated)
return;
- /* in MLO assume we have a link where we can TX the frame */
- tx = ieee80211_vif_is_mld(&sdata->vif) ||
- !sdata->deflink.csa_block_tx;
+ /*
+ * MLO drivers should have HANDLES_QUIET_CSA, so that csa_blocked_tx
+ * is always false; if they don't then this may try to transmit the
+ * frame but queues will be stopped.
+ */
+ tx = !sdata->csa_blocked_tx;
if (!ifmgd->driver_disconnect) {
unsigned int link_id;
@@ -3400,8 +3588,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
link = sdata_dereference(sdata->link[link_id], sdata);
if (!link)
continue;
- cfg80211_unlink_bss(local->hw.wiphy, link->u.mgd.bss);
- link->u.mgd.bss = NULL;
+ cfg80211_unlink_bss(local->hw.wiphy, link->conf->bss);
+ link->conf->bss = NULL;
}
}
@@ -3413,10 +3601,10 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
/* the other links will be destroyed */
sdata->vif.bss_conf.csa_active = false;
sdata->deflink.u.mgd.csa_waiting_bcn = false;
- if (sdata->deflink.csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->deflink.csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
@@ -3518,7 +3706,6 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
sta_info_destroy_addr(sdata, auth_data->ap_addr);
/* other links are destroyed */
- sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
@@ -3556,7 +3743,6 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, assoc_data->ap_addr);
- sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
@@ -4006,11 +4192,13 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
struct ieee80211_local *local = sdata->local;
unsigned int link_id = link->link_id;
struct ieee80211_elems_parse_params parse_params = {
+ .mode = link->u.mgd.conn.mode,
.start = elem_start,
.len = elem_len,
.link_id = link_id == assoc_data->assoc_link_id ? -1 : link_id,
.from_ap = true,
};
+ bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
const struct cfg80211_bss_ies *bss_ies = NULL;
@@ -4034,15 +4222,17 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
*/
assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS;
if (elems->ml_basic) {
- if (!(elems->ml_basic->control &
- cpu_to_le16(IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))) {
+ int bss_param_ch_cnt =
+ ieee80211_mle_get_bss_param_ch_cnt((const void *)elems->ml_basic);
+
+ if (bss_param_ch_cnt < 0) {
ret = false;
goto out;
}
- link->u.mgd.bss_param_ch_cnt =
- ieee80211_mle_get_bss_param_ch_cnt(elems->ml_basic);
+ link->u.mgd.bss_param_ch_cnt = bss_param_ch_cnt;
}
- } else if (!elems->prof ||
+ } else if (elems->parse_error & IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC ||
+ !elems->prof ||
!(elems->prof->control & prof_bss_param_ch_present)) {
ret = false;
goto out;
@@ -4086,9 +4276,9 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
*/
if (!is_6ghz &&
((assoc_data->wmm && !elems->wmm_param) ||
- (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
+ (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
(!elems->ht_cap_elem || !elems->ht_operation)) ||
- (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
+ (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT &&
(!elems->vht_cap_elem || !elems->vht_operation)))) {
const struct cfg80211_bss_ies *ies;
struct ieee802_11_elems *bss_elems;
@@ -4125,25 +4315,25 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
* have to include the IEs in the (re)association response.
*/
if (!elems->ht_cap_elem && bss_elems->ht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) {
elems->ht_cap_elem = bss_elems->ht_cap_elem;
sdata_info(sdata,
"AP bug: HT capability missing from AssocResp\n");
}
if (!elems->ht_operation && bss_elems->ht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) {
elems->ht_operation = bss_elems->ht_operation;
sdata_info(sdata,
"AP bug: HT operation missing from AssocResp\n");
}
if (!elems->vht_cap_elem && bss_elems->vht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
elems->vht_cap_elem = bss_elems->vht_cap_elem;
sdata_info(sdata,
"AP bug: VHT capa missing from AssocResp\n");
}
if (!elems->vht_operation && bss_elems->vht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
elems->vht_operation = bss_elems->vht_operation;
sdata_info(sdata,
"AP bug: VHT operation missing from AssocResp\n");
@@ -4155,8 +4345,10 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
/*
* We previously checked these in the beacon/probe response, so
* they should be present here. This is just a safety net.
+ * Note that the ieee80211_config_bw() below would also check
+ * for this (and more), but this has better error reporting.
*/
- if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
+ if (!is_6ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
(!elems->wmm_param || !elems->ht_cap_elem || !elems->ht_operation)) {
sdata_info(sdata,
"HT AP is missing WMM params or HT capability/operation\n");
@@ -4164,7 +4356,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
goto out;
}
- if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
+ if (is_5ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT &&
(!elems->vht_cap_elem || !elems->vht_operation)) {
sdata_info(sdata,
"VHT AP is missing VHT capability/operation\n");
@@ -4172,36 +4364,28 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
goto out;
}
- if (is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- !elems->he_6ghz_capa) {
- sdata_info(sdata,
- "HE 6 GHz AP is missing HE 6 GHz band capability\n");
- ret = false;
- goto out;
- }
-
- if (WARN_ON(!link->conf->chandef.chan)) {
+ /* check/update if AP changed anything in assoc response vs. scan */
+ if (ieee80211_config_bw(link, elems,
+ link_id == assoc_data->assoc_link_id,
+ changed)) {
ret = false;
goto out;
}
- sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
- if (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- (!elems->he_cap || !elems->he_operation)) {
- sdata_info(sdata,
- "HE AP is missing HE capability/operation\n");
+ if (WARN_ON(!link->conf->chanreq.oper.chan)) {
ret = false;
goto out;
}
+ sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band];
/* Set up internal HT/VHT capabilities */
- if (elems->ht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
+ if (elems->ht_cap_elem && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT)
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem,
link_sta);
if (elems->vht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
const struct ieee80211_vht_cap *bss_vht_cap = NULL;
const struct cfg80211_bss_ies *ies;
@@ -4228,14 +4412,41 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
rcu_read_unlock();
}
- if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ if (elems->he_operation &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE &&
elems->he_cap) {
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
+
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
elems->he_6ghz_capa,
link_sta);
+ he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation);
+
+ if (is_6ghz && he_6ghz_oper) {
+ switch (u8_get_bits(he_6ghz_oper->control,
+ IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ bss_conf->power_type = IEEE80211_REG_LPI_AP;
+ break;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ bss_conf->power_type = IEEE80211_REG_SP_AP;
+ break;
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ bss_conf->power_type = IEEE80211_REG_VLP_AP;
+ break;
+ default:
+ bss_conf->power_type = IEEE80211_REG_UNSET_AP;
+ break;
+ }
+ } else if (is_6ghz) {
+ link_info(link,
+ "HE 6 GHz operation missing (on %d MHz), expect issues\n",
+ bss_conf->chanreq.oper.chan->center_freq);
+ }
+
bss_conf->he_support = link_sta->pub->he_cap.has_he;
if (elems->rsnx && elems->rsnx_len &&
(elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) &&
@@ -4249,7 +4460,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
link_sta, elems);
if (elems->eht_operation && elems->eht_cap &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_EHT) {
ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
@@ -4258,7 +4469,6 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
link_sta);
bss_conf->eht_support = link_sta->pub->eht_cap.has_eht;
- *changed |= BSS_CHANGED_EHT_PUNCTURING;
} else {
bss_conf->eht_support = false;
}
@@ -4456,7 +4666,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
bool support_160;
u8 chains = 1;
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HT)
return chains;
ht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_CAPABILITY);
@@ -4469,7 +4679,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
*/
}
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_VHT)
return chains;
vht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
@@ -4488,7 +4698,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
chains = max(chains, nss);
}
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HE)
return chains;
ies = rcu_dereference(cbss->ies);
@@ -4539,533 +4749,331 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
return chains;
}
-static bool
-ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_bss_ies *ies,
- const struct ieee80211_he_operation *he_op)
+static void
+ieee80211_determine_our_sta_mode(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_assoc_request *req,
+ bool wmm_used, int link_id,
+ struct ieee80211_conn_settings *conn)
{
- const struct element *he_cap_elem;
- const struct ieee80211_he_cap_elem *he_cap;
- struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
- u16 mcs_80_map_tx, mcs_80_map_rx;
- u16 ap_min_req_set;
- int mcs_nss_size;
- int nss;
-
- he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
- ies->data, ies->len);
-
- if (!he_cap_elem)
- return false;
+ struct ieee80211_sta_ht_cap sta_ht_cap = sband->ht_cap;
+ bool is_5ghz = sband->band == NL80211_BAND_5GHZ;
+ bool is_6ghz = sband->band == NL80211_BAND_6GHZ;
+ const struct ieee80211_sta_he_cap *he_cap;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
- /* invalid HE IE */
- if (he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
- sdata_info(sdata,
- "Invalid HE elem, Disable HE\n");
- return false;
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ conn->mode = IEEE80211_CONN_MODE_S1G;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_dbg(sdata, "operating as S1G STA\n");
+ return;
}
- /* skip one byte ext_tag_id */
- he_cap = (void *)(he_cap_elem->data + 1);
- mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap);
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
- /* invalid HE IE */
- if (he_cap_elem->datalen < 1 + sizeof(*he_cap) + mcs_nss_size) {
- sdata_info(sdata,
- "Invalid HE elem with nss size, Disable HE\n");
- return false;
+ ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+
+ if (req && req->flags & ASSOC_REQ_DISABLE_HT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HT disabled by flag, limiting to legacy\n");
+ goto out;
}
- /* mcs_nss is right after he_cap info */
- he_mcs_nss_supp = (void *)(he_cap + 1);
+ if (!wmm_used) {
+ mlme_link_id_dbg(sdata, link_id,
+ "WMM/QoS not supported, limiting to legacy\n");
+ goto out;
+ }
- mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
- mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
+ if (req) {
+ unsigned int i;
- /* P802.11-REVme/D0.3
- * 27.1.1 Introduction to the HE PHY
- * ...
- * An HE STA shall support the following features:
- * ...
- * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
- * supported channel widths for HE SU PPDUs
- */
- if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
- sdata_info(sdata,
- "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
- mcs_80_map_tx, mcs_80_map_rx);
- return false;
+ for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
+ if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
+ netdev_info(sdata->dev,
+ "WEP/TKIP use, limiting to legacy\n");
+ goto out;
+ }
+ }
}
- if (!he_op)
- return true;
+ if (!sta_ht_cap.ht_supported && !is_6ghz) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HT not supported (and not on 6 GHz), limiting to legacy\n");
+ goto out;
+ }
- ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+ /* HT is fine */
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ IEEE80211_CONN_BW_LIMIT_40 :
+ IEEE80211_CONN_BW_LIMIT_20;
- /*
- * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
- * zeroes, which is nonsense, and completely inconsistent with itself
- * (it doesn't have 8 streams). Accept the settings in this case anyway.
- */
- if (!ap_min_req_set)
- return true;
-
- /* make sure the AP is consistent with itself
- *
- * P802.11-REVme/D0.3
- * 26.17.1 Basic HE BSS operation
- *
- * A STA that is operating in an HE BSS shall be able to receive and
- * transmit at each of the <HE-MCS, NSS> tuple values indicated by the
- * Basic HE-MCS And NSS Set field of the HE Operation parameter of the
- * MLME-START.request primitive and shall be able to receive at each of
- * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
- * NSS Set field in the HE Capabilities parameter of the MLMESTART.request
- * primitive
- */
- for (nss = 8; nss > 0; nss--) {
- u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
- u8 ap_rx_val;
- u8 ap_tx_val;
+ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+ ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
- if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
- continue;
+ if (req && req->flags & ASSOC_REQ_DISABLE_VHT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "VHT disabled by flag, limiting to HT\n");
+ goto out;
+ }
- ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
- ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
+ if (vht_cap.vht_supported && is_5ghz) {
+ bool have_80mhz = false;
+ unsigned int i;
- if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
- sdata_info(sdata,
- "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
- nss, ap_rx_val, ap_rx_val, ap_op_val);
- return false;
+ if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40 MHz support on 5 GHz, limiting to HT\n");
+ goto out;
}
- }
- return true;
-}
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
-static bool
-ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_supported_band *sband,
- const struct ieee80211_he_operation *he_op)
-{
- const struct ieee80211_sta_he_cap *sta_he_cap =
- ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- u16 ap_min_req_set;
- int i;
+ have_80mhz = true;
+ break;
+ }
- if (!sta_he_cap || !he_op)
- return false;
+ if (!have_80mhz) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no 80 MHz channel support on 5 GHz, limiting to HT\n");
+ goto out;
+ }
+ } else if (is_5ghz) { /* !vht_supported but on 5 GHz */
+ mlme_link_id_dbg(sdata, link_id,
+ "no VHT support on 5 GHz, limiting to HT\n");
+ goto out;
+ }
- ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+ /* VHT - if we have - is fine, including 80 MHz, check 160 below again */
+ if (sband->band != NL80211_BAND_2GHZ) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160;
+ }
- /*
- * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
- * zeroes, which is nonsense, and completely inconsistent with itself
- * (it doesn't have 8 streams). Accept the settings in this case anyway.
- */
- if (!ap_min_req_set)
- return true;
+ if (is_5ghz &&
+ !(vht_cap.cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))) {
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
+ mlme_link_id_dbg(sdata, link_id,
+ "no VHT 160 MHz capability on 5 GHz, limiting to 80 MHz");
+ }
- /* Need to go over for 80MHz, 160MHz and for 80+80 */
- for (i = 0; i < 3; i++) {
- const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
- &sta_he_cap->he_mcs_nss_supp;
- u16 sta_mcs_map_rx =
- le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
- u16 sta_mcs_map_tx =
- le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
- u8 nss;
- bool verified = true;
+ if (req && req->flags & ASSOC_REQ_DISABLE_HE) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HE disabled by flag, limiting to HT/VHT\n");
+ goto out;
+ }
- /*
- * For each band there is a maximum of 8 spatial streams
- * possible. Each of the sta_mcs_map_* is a 16-bit struct built
- * of 2 bits per NSS (1-8), with the values defined in enum
- * ieee80211_he_mcs_support. Need to make sure STA TX and RX
- * capabilities aren't less than the AP's minimum requirements
- * for this HE BSS per SS.
- * It is enough to find one such band that meets the reqs.
- */
- for (nss = 8; nss > 0; nss--) {
- u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
- u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
- u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ if (!he_cap) {
+ WARN_ON(is_6ghz);
+ mlme_link_id_dbg(sdata, link_id,
+ "no HE support, limiting to HT/VHT\n");
+ goto out;
+ }
- if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
- continue;
+ /* so we have HE */
+ conn->mode = IEEE80211_CONN_MODE_HE;
- /*
- * Make sure the HE AP doesn't require MCSs that aren't
- * supported by the client as required by spec
- *
- * P802.11-REVme/D0.3
- * 26.17.1 Basic HE BSS operation
- *
- * An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
- * a BSS, unless it supports (i.e., is able to both transmit and
- * receive using) all of the <HE-MCS, NSS> tuples in the basic
- * HE-MCS and NSS set.
- */
- if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
- verified = false;
- break;
- }
+ /* check bandwidth */
+ switch (sband->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ break;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40 MHz HE cap in 2.4 GHz, limiting to 20 MHz\n");
+ break;
+ case NL80211_BAND_5GHZ:
+ if (!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)) {
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40/80 MHz HE cap in 5 GHz, limiting to 20 MHz\n");
+ break;
}
-
- if (verified)
- return true;
+ if (!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)) {
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_80);
+ mlme_link_id_dbg(sdata, link_id,
+ "no 160 MHz HE cap in 5 GHz, limiting to 80 MHz\n");
+ }
+ break;
+ case NL80211_BAND_6GHZ:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ break;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_80);
+ mlme_link_id_dbg(sdata, link_id,
+ "no 160 MHz HE cap in 6 GHz, limiting to 80 MHz\n");
+ break;
}
- /* If here, STA doesn't meet AP's HE min requirements */
- return false;
-}
+ if (req && req->flags & ASSOC_REQ_DISABLE_EHT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "EHT disabled by flag, limiting to HE\n");
+ goto out;
+ }
-static u8
-ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
- const struct ieee80211_sta_eht_cap *sta_eht_cap,
- unsigned int idx, int bw)
-{
- u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
- u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
+ eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ if (!eht_cap) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no EHT support, limiting to HE\n");
+ goto out;
+ }
- /* handle us being a 20 MHz-only EHT STA - with four values
- * for MCS 0-7, 8-9, 10-11, 12-13.
- */
- if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
- return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
+ /* we have EHT */
- /* the others have MCS 0-9 together, rather than separately from 0-7 */
- if (idx > 0)
- idx--;
+ conn->mode = IEEE80211_CONN_MODE_EHT;
- switch (bw) {
- case 0:
- return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
- case 1:
- if (!(he_phy_cap0 &
- (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
- return 0xff; /* pass check */
- return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
- case 2:
- if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
- return 0xff; /* pass check */
- return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
- }
+ /* check bandwidth */
+ if (is_6ghz &&
+ eht_cap->eht_cap_elem.phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_320;
+ else if (is_6ghz)
+ mlme_link_id_dbg(sdata, link_id,
+ "no EHT 320 MHz cap in 6 GHz, limiting to 160 MHz\n");
- WARN_ON(1);
- return 0;
+out:
+ mlme_link_id_dbg(sdata, link_id,
+ "determined local STA to be %s, BW limited to %d MHz\n",
+ ieee80211_conn_mode_str(conn->mode),
+ 20 * (1 << conn->bw_limit));
}
-static bool
-ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_supported_band *sband,
- const struct ieee80211_eht_operation *eht_op)
+static void
+ieee80211_determine_our_sta_mode_auth(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_auth_request *req,
+ bool wmm_used,
+ struct ieee80211_conn_settings *conn)
{
- const struct ieee80211_sta_he_cap *sta_he_cap =
- ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- const struct ieee80211_sta_eht_cap *sta_eht_cap =
- ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
- const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
- unsigned int i;
-
- if (!sta_he_cap || !sta_eht_cap || !eht_op)
- return false;
-
- req = &eht_op->basic_mcs_nss;
-
- for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
- u8 req_rx_nss, req_tx_nss;
- unsigned int bw;
-
- req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
- IEEE80211_EHT_MCS_NSS_RX);
- req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
- IEEE80211_EHT_MCS_NSS_TX);
+ ieee80211_determine_our_sta_mode(sdata, sband, NULL, wmm_used,
+ req->link_id > 0 ? req->link_id : 0,
+ conn);
+}
- for (bw = 0; bw < 3; bw++) {
- u8 have, have_rx_nss, have_tx_nss;
+static void
+ieee80211_determine_our_sta_mode_assoc(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_assoc_request *req,
+ bool wmm_used, int link_id,
+ struct ieee80211_conn_settings *conn)
+{
+ struct ieee80211_conn_settings tmp;
- have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
- sta_eht_cap,
- i, bw);
- have_rx_nss = u8_get_bits(have,
- IEEE80211_EHT_MCS_NSS_RX);
- have_tx_nss = u8_get_bits(have,
- IEEE80211_EHT_MCS_NSS_TX);
+ WARN_ON(!req);
- if (req_rx_nss > have_rx_nss ||
- req_tx_nss > have_tx_nss)
- return false;
- }
- }
+ ieee80211_determine_our_sta_mode(sdata, sband, req, wmm_used, link_id,
+ &tmp);
- return true;
+ conn->mode = min_t(enum ieee80211_conn_mode,
+ conn->mode, tmp.mode);
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit, tmp.bw_limit);
}
static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
- struct cfg80211_bss *cbss,
- bool mlo,
- ieee80211_conn_flags_t *conn_flags)
+ int link_id,
+ struct cfg80211_bss *cbss, bool mlo,
+ struct ieee80211_conn_settings *conn)
{
struct ieee80211_local *local = sdata->local;
- const struct ieee80211_ht_cap *ht_cap = NULL;
- const struct ieee80211_ht_operation *ht_oper = NULL;
- const struct ieee80211_vht_operation *vht_oper = NULL;
- const struct ieee80211_he_operation *he_oper = NULL;
- const struct ieee80211_eht_operation *eht_oper = NULL;
- const struct ieee80211_s1g_oper_ie *s1g_oper = NULL;
- struct ieee80211_supported_band *sband;
- struct cfg80211_chan_def chandef;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
- bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
- bool supports_mlo = false;
- struct ieee80211_bss *bss = (void *)cbss->priv;
- struct ieee80211_elems_parse_params parse_params = {
- .link_id = -1,
- .from_ap = true,
- };
+ struct ieee80211_chan_req chanreq = {};
struct ieee802_11_elems *elems;
- const struct cfg80211_bss_ies *ies;
int ret;
u32 i;
- bool have_80mhz;
lockdep_assert_wiphy(local->hw.wiphy);
rcu_read_lock();
+ elems = ieee80211_determine_chan_mode(sdata, conn, cbss, link_id,
+ &chanreq);
- ies = rcu_dereference(cbss->ies);
- parse_params.start = ies->data;
- parse_params.len = ies->len;
- elems = ieee802_11_parse_elems_full(&parse_params);
- if (!elems) {
+ if (IS_ERR(elems)) {
rcu_read_unlock();
- return -ENOMEM;
- }
-
- sband = local->hw.wiphy->bands[cbss->channel->band];
-
- *conn_flags &= ~(IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ);
-
- /* disable HT/VHT/HE if we don't support them */
- if (!sband->ht_cap.ht_supported && !is_6ghz) {
- mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE/EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ return PTR_ERR(elems);
}
- if (!sband->vht_cap.vht_supported && is_5ghz) {
- mlme_dbg(sdata, "VHT not supported, disabling VHT/HE/EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
- mlme_dbg(sdata, "HE not supported, disabling HE and EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) {
- mlme_dbg(sdata, "EHT not supported, disabling EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_HT) && !is_6ghz) {
- ht_oper = elems->ht_operation;
- ht_cap = elems->ht_cap_elem;
-
- if (!ht_cap) {
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- ht_oper = NULL;
- }
- }
-
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_VHT) && !is_6ghz) {
- vht_oper = elems->vht_operation;
- if (vht_oper && !ht_oper) {
- vht_oper = NULL;
- sdata_info(sdata,
- "AP advertised VHT without HT, disabling HT/VHT/HE\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!elems->vht_cap_elem) {
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- vht_oper = NULL;
- }
+ if (mlo && !elems->ml_basic) {
+ sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n");
+ rcu_read_unlock();
+ kfree(elems);
+ return -EINVAL;
}
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_HE)) {
- he_oper = elems->he_operation;
-
- if (link && is_6ghz) {
- struct ieee80211_bss_conf *bss_conf;
- u8 j = 0;
+ if (link && is_6ghz && conn->mode >= IEEE80211_CONN_MODE_HE) {
+ struct ieee80211_bss_conf *bss_conf;
+ u8 j = 0;
- bss_conf = link->conf;
+ bss_conf = link->conf;
- if (elems->pwr_constr_elem)
- bss_conf->pwr_reduction = *elems->pwr_constr_elem;
+ if (elems->pwr_constr_elem)
+ bss_conf->pwr_reduction = *elems->pwr_constr_elem;
- BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) !=
- ARRAY_SIZE(elems->tx_pwr_env));
-
- for (i = 0; i < elems->tx_pwr_env_num; i++) {
- if (elems->tx_pwr_env_len[i] >
- sizeof(bss_conf->tx_pwr_env[j]))
- continue;
+ BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) !=
+ ARRAY_SIZE(elems->tx_pwr_env));
- bss_conf->tx_pwr_env_num++;
- memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i],
- elems->tx_pwr_env_len[i]);
- j++;
- }
- }
-
- if (!ieee80211_verify_peer_he_mcs_support(sdata, ies, he_oper) ||
- !ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
- *conn_flags |= IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- }
-
- /*
- * EHT requires HE to be supported as well. Specifically for 6 GHz
- * channels, the operation channel information can only be deduced from
- * both the 6 GHz operation information (from the HE operation IE) and
- * EHT operation.
- */
- if (!(*conn_flags &
- (IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT)) &&
- he_oper) {
- const struct cfg80211_bss_ies *cbss_ies;
- const struct element *eht_ml_elem;
- const u8 *eht_oper_ie;
-
- cbss_ies = rcu_dereference(cbss->ies);
- eht_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_EHT_OPERATION,
- cbss_ies->data, cbss_ies->len);
- if (eht_oper_ie && eht_oper_ie[1] >=
- 1 + sizeof(struct ieee80211_eht_operation))
- eht_oper = (void *)(eht_oper_ie + 3);
- else
- eht_oper = NULL;
-
- if (!ieee80211_verify_sta_eht_mcs_support(sdata, sband, eht_oper))
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
-
- eht_ml_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
- cbss_ies->data, cbss_ies->len);
-
- /* data + 1 / datalen - 1 since it's an extended element */
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_EHT) &&
- eht_ml_elem &&
- ieee80211_mle_type_ok(eht_ml_elem->data + 1,
- IEEE80211_ML_CONTROL_TYPE_BASIC,
- eht_ml_elem->datalen - 1)) {
- supports_mlo = true;
+ for (i = 0; i < elems->tx_pwr_env_num; i++) {
+ if (elems->tx_pwr_env_len[i] > sizeof(bss_conf->tx_pwr_env[j]))
+ continue;
- sdata->vif.cfg.eml_cap =
- ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1);
- sdata->vif.cfg.eml_med_sync_delay =
- ieee80211_mle_get_eml_med_sync_delay(eht_ml_elem->data + 1);
+ bss_conf->tx_pwr_env_num++;
+ memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i],
+ elems->tx_pwr_env_len[i]);
+ j++;
}
}
-
- /* Allow VHT if at least one channel on the sband supports 80 MHz */
- have_80mhz = false;
- for (i = 0; i < sband->n_channels; i++) {
- if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
- IEEE80211_CHAN_NO_80MHZ))
- continue;
-
- have_80mhz = true;
- break;
- }
-
- if (!have_80mhz) {
- sdata_info(sdata, "80 MHz not supported, disabling VHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- }
-
- if (sband->band == NL80211_BAND_S1GHZ) {
- s1g_oper = elems->s1g_oper;
- if (!s1g_oper)
- sdata_info(sdata,
- "AP missing S1G operation element?\n");
- }
-
- *conn_flags |=
- ieee80211_determine_chantype(sdata, link, *conn_flags,
- sband,
- cbss->channel,
- bss->vht_cap_info,
- ht_oper, vht_oper,
- he_oper, eht_oper,
- s1g_oper,
- &chandef, false);
-
- if (link)
- link->needed_rx_chains =
- min(ieee80211_max_rx_chains(link, cbss),
- local->rx_chains);
-
rcu_read_unlock();
/* the element data was RCU protected so no longer valid anyway */
kfree(elems);
elems = NULL;
- if (*conn_flags & IEEE80211_CONN_DISABLE_HE && is_6ghz) {
- sdata_info(sdata, "Rejecting non-HE 6/7 GHz connection");
- return -EINVAL;
- }
-
- if (mlo && !supports_mlo) {
- sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n");
- return -EINVAL;
- }
-
if (!link)
return 0;
+ rcu_read_lock();
+ link->needed_rx_chains = min(ieee80211_max_rx_chains(link, cbss),
+ local->rx_chains);
+ rcu_read_unlock();
+
/*
* If this fails (possibly due to channel context sharing
* on incompatible channels, e.g. 80+80 and 160 sharing the
* same control channel) try to use a smaller bandwidth.
*/
- ret = ieee80211_link_use_channel(link, &chandef,
+ ret = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
/* don't downgrade for 5 and 10 MHz channels, though. */
- if (chandef.width == NL80211_CHAN_WIDTH_5 ||
- chandef.width == NL80211_CHAN_WIDTH_10)
- goto out;
+ if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_10)
+ return ret;
+
+ while (ret && chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT) {
+ ieee80211_chanreq_downgrade(&chanreq, conn);
- while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
- *conn_flags |=
- ieee80211_chandef_downgrade(&chandef);
- ret = ieee80211_link_use_channel(link, &chandef,
+ ret = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
}
- out:
+
return ret;
}
@@ -5126,6 +5134,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sta))
goto out_err;
+ sta->sta.spp_amsdu = assoc_data->spp_amsdu;
+
if (ieee80211_vif_is_mld(&sdata->vif)) {
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
if (!assoc_data->link[link_id].bss)
@@ -5189,8 +5199,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
if (link_id != assoc_data->assoc_link_id) {
- err = ieee80211_prep_channel(sdata, link, cbss, true,
- &link->u.mgd.conn_flags);
+ link->u.mgd.conn = assoc_data->link[link_id].conn;
+
+ err = ieee80211_prep_channel(sdata, link, link_id, cbss,
+ true, &link->u.mgd.conn);
if (err) {
link_info(link, "prep_channel failed\n");
goto out_err;
@@ -5308,6 +5320,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (!assoc_data)
return;
+ parse_params.mode =
+ assoc_data->link[assoc_data->assoc_link_id].conn.mode;
+
if (!ether_addr_equal(assoc_data->ap_addr, mgmt->bssid) ||
!ether_addr_equal(assoc_data->ap_addr, mgmt->sa))
return;
@@ -5424,6 +5439,13 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
assoc_data->ap_addr);
goto abandon_assoc;
}
+
+ sdata->vif.cfg.eml_cap =
+ ieee80211_mle_get_eml_cap((const void *)elems->ml_basic);
+ sdata->vif.cfg.eml_med_sync_delay =
+ ieee80211_mle_get_eml_med_sync_delay((const void *)elems->ml_basic);
+ sdata->vif.cfg.mld_capa_op =
+ ieee80211_mle_get_mld_capa_op((const void *)elems->ml_basic);
}
sdata->vif.cfg.aid = aid;
@@ -5686,49 +5708,6 @@ static bool ieee80211_rx_our_beacon(const u8 *tx_bssid,
return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid);
}
-static bool ieee80211_config_puncturing(struct ieee80211_link_data *link,
- const struct ieee80211_eht_operation *eht_oper,
- u64 *changed)
-{
- struct ieee80211_local *local = link->sdata->local;
- u16 bitmap = 0, extracted;
-
- if ((eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
- (eht_oper->params &
- IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
- const struct ieee80211_eht_operation_info *info =
- (void *)eht_oper->optional;
- const u8 *disable_subchannel_bitmap = info->optional;
-
- bitmap = get_unaligned_le16(disable_subchannel_bitmap);
- }
-
- extracted = ieee80211_extract_dis_subch_bmap(eht_oper,
- &link->conf->chandef,
- bitmap);
-
- /* accept if there are no changes */
- if (!(*changed & BSS_CHANGED_BANDWIDTH) &&
- extracted == link->conf->eht_puncturing)
- return true;
-
- if (!cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- &link->conf->chandef)) {
- link_info(link,
- "Got an invalid disable subchannel bitmap from AP %pM: bitmap = 0x%x, bw = 0x%x. disconnect\n",
- link->u.mgd.bssid,
- bitmap,
- link->conf->chandef.width);
- return false;
- }
-
- if (bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING))
- return false;
-
- ieee80211_handle_puncturing_bitmap(link, eht_oper, bitmap, changed);
- return true;
-}
-
static void ieee80211_ml_reconf_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
@@ -5792,9 +5771,7 @@ out:
static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems)
{
- const struct ieee80211_multi_link_elem *ml;
const struct element *sub;
- ssize_t ml_len;
unsigned long removed_links = 0;
u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {};
u8 link_id;
@@ -5803,24 +5780,11 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_vif_is_mld(&sdata->vif) || !elems->ml_reconf)
return;
- ml_len = cfg80211_defragment_element(elems->ml_reconf_elem,
- elems->ie_start,
- elems->total_len,
- elems->scratch_pos,
- elems->scratch + elems->scratch_len -
- elems->scratch_pos,
- WLAN_EID_FRAGMENT);
- if (ml_len < 0)
- return;
-
- elems->ml_reconf = (const void *)elems->scratch_pos;
- elems->ml_reconf_len = ml_len;
- ml = elems->ml_reconf;
-
/* Directly parse the sub elements as the common information doesn't
* hold any useful information.
*/
- for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
+ for_each_mle_subelement(sub, (const u8 *)elems->ml_reconf,
+ elems->ml_reconf_len) {
struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
u8 *pos = prof->variable;
u16 control;
@@ -5888,6 +5852,56 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
TU_TO_JIFFIES(delay));
}
+static int ieee80211_ttlm_set_links(struct ieee80211_sub_if_data *sdata,
+ u16 active_links, u16 dormant_links,
+ u16 suspended_links)
+{
+ u64 changed = 0;
+ int ret;
+
+ if (!active_links) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If there is an active negotiated TTLM, it should be discarded by
+ * the new negotiated/advertised TTLM.
+ */
+ if (sdata->vif.neg_ttlm.valid) {
+ memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm));
+ sdata->vif.suspended_links = 0;
+ changed = BSS_CHANGED_MLD_TTLM;
+ }
+
+ if (sdata->vif.active_links != active_links) {
+ ret = ieee80211_set_active_links(&sdata->vif, active_links);
+ if (ret) {
+ sdata_info(sdata, "Failed to set TTLM active links\n");
+ goto out;
+ }
+ }
+
+ ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
+ dormant_links);
+ if (ret) {
+ sdata_info(sdata, "Failed to set TTLM dormant links\n");
+ goto out;
+ }
+
+ changed |= BSS_CHANGED_MLD_VALID_LINKS;
+ sdata->vif.suspended_links = suspended_links;
+ if (sdata->vif.suspended_links)
+ changed |= BSS_CHANGED_MLD_TTLM;
+
+ ieee80211_vif_cfg_change_notify(sdata, changed);
+
+out:
+ if (ret)
+ ieee80211_disconnect(&sdata->vif, false);
+
+ return ret;
+}
+
static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
@@ -5895,30 +5909,19 @@ static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
u.mgd.ttlm_work.work);
- int ret;
new_active_links = sdata->u.mgd.ttlm_info.map &
sdata->vif.valid_links;
new_dormant_links = ~sdata->u.mgd.ttlm_info.map &
sdata->vif.valid_links;
- if (!new_active_links) {
- ieee80211_disconnect(&sdata->vif, false);
- return;
- }
ieee80211_vif_set_links(sdata, sdata->vif.valid_links, 0);
- new_active_links = BIT(ffs(new_active_links) - 1);
- ieee80211_set_active_links(&sdata->vif, new_active_links);
-
- ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
- new_dormant_links);
+ if (ieee80211_ttlm_set_links(sdata, new_active_links, new_dormant_links,
+ 0))
+ return;
sdata->u.mgd.ttlm_info.active = true;
sdata->u.mgd.ttlm_info.switch_time = 0;
-
- if (!ret)
- ieee80211_vif_cfg_change_notify(sdata,
- BSS_CHANGED_MLD_VALID_LINKS);
}
static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data)
@@ -6128,6 +6131,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
u8 *bssid, *variable = mgmt->u.beacon.variable;
u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
struct ieee80211_elems_parse_params parse_params = {
+ .mode = link->u.mgd.conn.mode,
.link_id = -1,
.from_ap = true,
};
@@ -6210,7 +6214,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
}
if (!ifmgd->associated ||
- !ieee80211_rx_our_beacon(bssid, link->u.mgd.bss))
+ !ieee80211_rx_our_beacon(bssid, link->conf->bss))
return;
bssid = link->u.mgd.bssid;
@@ -6237,7 +6241,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
*/
if (!ieee80211_is_s1g_beacon(hdr->frame_control))
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
- parse_params.bss = link->u.mgd.bss;
+ parse_params.bss = link->conf->bss;
parse_params.filter = care_about_ies;
parse_params.crc = ncrc;
elems = ieee802_11_parse_elems_full(&parse_params);
@@ -6299,9 +6303,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
}
}
- if (link->u.mgd.csa_waiting_bcn)
- ieee80211_chswitch_post_beacon(link);
-
/*
* Update beacon timing and dtim count on every beacon appearance. This
* will allow the driver to use the most updated values. Do it before
@@ -6375,21 +6376,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
goto free;
}
- if (WARN_ON(!link->conf->chandef.chan))
+ if (WARN_ON(!link->conf->chanreq.oper.chan))
goto free;
- sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
+ sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band];
changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems);
- if (ieee80211_config_bw(link, elems->ht_cap_elem,
- elems->vht_cap_elem, elems->ht_operation,
- elems->vht_operation, elems->he_operation,
- elems->eht_operation,
- elems->s1g_oper, bssid, &changed)) {
- sdata_info(sdata,
- "failed to follow AP %pM bandwidth change, disconnect\n",
- bssid);
+ if (ieee80211_config_bw(link, elems, true, &changed)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
true, deauth_buf);
@@ -6411,21 +6405,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
elems->pwr_constr_elem,
elems->cisco_dtpc_elem);
- if (elems->eht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
- if (!ieee80211_config_puncturing(link, elems->eht_operation,
- &changed)) {
- ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
- WLAN_REASON_DEAUTH_LEAVING,
- true, deauth_buf);
- ieee80211_report_disconnect(sdata, deauth_buf,
- sizeof(deauth_buf), true,
- WLAN_REASON_DEAUTH_LEAVING,
- false);
- goto free;
- }
- }
-
ieee80211_ml_reconfiguration(sdata, elems);
ieee80211_process_adv_ttlm(sdata, elems,
le64_to_cpu(mgmt->u.beacon.timestamp));
@@ -6435,6 +6414,376 @@ free:
kfree(elems);
}
+static void ieee80211_apply_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm neg_ttlm)
+{
+ u16 new_active_links, new_dormant_links, new_suspended_links, map = 0;
+ u8 i;
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++)
+ map |= neg_ttlm.downlink[i] | neg_ttlm.uplink[i];
+
+ /* If there is an active TTLM, unset previously suspended links */
+ if (sdata->vif.neg_ttlm.valid)
+ sdata->vif.dormant_links &= ~sdata->vif.suspended_links;
+
+ /* exclude links that are already disabled by advertised TTLM */
+ new_active_links =
+ map & sdata->vif.valid_links & ~sdata->vif.dormant_links;
+ new_suspended_links =
+ (~map & sdata->vif.valid_links) & ~sdata->vif.dormant_links;
+ new_dormant_links = sdata->vif.dormant_links | new_suspended_links;
+ if (ieee80211_ttlm_set_links(sdata, new_active_links,
+ new_dormant_links, new_suspended_links))
+ return;
+
+ sdata->vif.neg_ttlm = neg_ttlm;
+ sdata->vif.neg_ttlm.valid = true;
+}
+
+static void ieee80211_neg_ttlm_timeout_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.neg_ttlm_timeout_work.work);
+
+ sdata_info(sdata,
+ "No negotiated TTLM response from AP, disconnecting.\n");
+
+ __ieee80211_disconnect(sdata);
+}
+
+static void
+ieee80211_neg_ttlm_add_suggested_map(struct sk_buff *skb,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u8 i, direction[IEEE80211_TTLM_MAX_CNT];
+
+ if (memcmp(neg_ttlm->downlink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->downlink))) {
+ direction[0] = IEEE80211_TTLM_DIRECTION_DOWN;
+ direction[1] = IEEE80211_TTLM_DIRECTION_UP;
+ } else {
+ direction[0] = IEEE80211_TTLM_DIRECTION_BOTH;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(direction); i++) {
+ u8 tid, len, map_ind = 0, *len_pos, *map_ind_pos, *pos;
+ __le16 map;
+
+ len = sizeof(struct ieee80211_ttlm_elem) + 1 + 1;
+
+ pos = skb_put(skb, len + 2);
+ *pos++ = WLAN_EID_EXTENSION;
+ len_pos = pos++;
+ *pos++ = WLAN_EID_EXT_TID_TO_LINK_MAPPING;
+ *pos++ = direction[i];
+ map_ind_pos = pos++;
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ map = direction[i] == IEEE80211_TTLM_DIRECTION_UP ?
+ cpu_to_le16(neg_ttlm->uplink[tid]) :
+ cpu_to_le16(neg_ttlm->downlink[tid]);
+ if (!map)
+ continue;
+
+ len += 2;
+ map_ind |= BIT(tid);
+ skb_put_data(skb, &map, sizeof(map));
+ }
+
+ *map_ind_pos = map_ind;
+ *len_pos = len;
+
+ if (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH)
+ break;
+ }
+}
+
+static void
+ieee80211_send_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm,
+ u8 dialog_token)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_req);
+ int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 2 * 2 * IEEE80211_TTLM_NUM_TIDS;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->tx_headroom);
+ mgmt = skb_put_zero(skb, hdr_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ttlm_req.action_code =
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ;
+ mgmt->u.action.u.ttlm_req.dialog_token = dialog_token;
+ ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ ieee80211_tx_skb(sdata, skb);
+}
+
+int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ttlm_params *params)
+{
+ struct ieee80211_neg_ttlm neg_ttlm = {};
+ u8 i;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ !(sdata->vif.cfg.mld_capa_op &
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP))
+ return -EINVAL;
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if ((params->dlink[i] & ~sdata->vif.valid_links) ||
+ (params->ulink[i] & ~sdata->vif.valid_links))
+ return -EINVAL;
+
+ neg_ttlm.downlink[i] = params->dlink[i];
+ neg_ttlm.uplink[i] = params->ulink[i];
+ }
+
+ if (drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm) !=
+ NEG_TTLM_RES_ACCEPT)
+ return -EINVAL;
+
+ ieee80211_apply_neg_ttlm(sdata, neg_ttlm);
+ sdata->u.mgd.dialog_token_alloc++;
+ ieee80211_send_neg_ttlm_req(sdata, &sdata->vif.neg_ttlm,
+ sdata->u.mgd.dialog_token_alloc);
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work);
+ wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work,
+ IEEE80211_NEG_TTLM_REQ_TIMEOUT);
+ return 0;
+}
+
+static void
+ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_neg_ttlm_res ttlm_res,
+ u8 dialog_token,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_res);
+ int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 2 * 2 * IEEE80211_TTLM_NUM_TIDS;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->tx_headroom);
+ mgmt = skb_put_zero(skb, hdr_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ttlm_res.action_code =
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES;
+ mgmt->u.action.u.ttlm_res.dialog_token = dialog_token;
+ switch (ttlm_res) {
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case NEG_TTLM_RES_REJECT:
+ mgmt->u.action.u.ttlm_res.status_code =
+ WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
+ break;
+ case NEG_TTLM_RES_ACCEPT:
+ mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_SUCCESS;
+ break;
+ case NEG_TTLM_RES_SUGGEST_PREFERRED:
+ mgmt->u.action.u.ttlm_res.status_code =
+ WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
+ ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ break;
+ }
+
+ ieee80211_tx_skb(sdata, skb);
+}
+
+static int
+ieee80211_parse_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_ttlm_elem *ttlm,
+ struct ieee80211_neg_ttlm *neg_ttlm,
+ u8 *direction)
+{
+ u8 control, link_map_presence, map_size, tid;
+ u8 *pos;
+
+ /* The element size was already validated in
+ * ieee80211_tid_to_link_map_size_ok()
+ */
+ pos = (void *)ttlm->optional;
+
+ control = ttlm->control;
+
+ /* mapping switch time and expected duration fields are not expected
+ * in case of negotiated TTLM
+ */
+ if (control & (IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT |
+ IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)) {
+ mlme_dbg(sdata,
+ "Invalid TTLM element in negotiated TTLM request\n");
+ return -EINVAL;
+ }
+
+ if (control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) {
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ neg_ttlm->downlink[tid] = sdata->vif.valid_links;
+ neg_ttlm->uplink[tid] = sdata->vif.valid_links;
+ }
+ *direction = IEEE80211_TTLM_DIRECTION_BOTH;
+ return 0;
+ }
+
+ *direction = u8_get_bits(control, IEEE80211_TTLM_CONTROL_DIRECTION);
+ if (*direction != IEEE80211_TTLM_DIRECTION_DOWN &&
+ *direction != IEEE80211_TTLM_DIRECTION_UP &&
+ *direction != IEEE80211_TTLM_DIRECTION_BOTH)
+ return -EINVAL;
+
+ link_map_presence = *pos;
+ pos++;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ map_size = 1;
+ else
+ map_size = 2;
+
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ u16 map;
+
+ if (link_map_presence & BIT(tid)) {
+ map = ieee80211_get_ttlm(map_size, pos);
+ if (!map) {
+ mlme_dbg(sdata,
+ "No active links for TID %d", tid);
+ return -EINVAL;
+ }
+ } else {
+ map = 0;
+ }
+
+ switch (*direction) {
+ case IEEE80211_TTLM_DIRECTION_BOTH:
+ neg_ttlm->downlink[tid] = map;
+ neg_ttlm->uplink[tid] = map;
+ break;
+ case IEEE80211_TTLM_DIRECTION_DOWN:
+ neg_ttlm->downlink[tid] = map;
+ break;
+ case IEEE80211_TTLM_DIRECTION_UP:
+ neg_ttlm->uplink[tid] = map;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pos += map_size;
+ }
+ return 0;
+}
+
+void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ u8 dialog_token, direction[IEEE80211_TTLM_MAX_CNT] = {}, i;
+ size_t ies_len;
+ enum ieee80211_neg_ttlm_res ttlm_res = NEG_TTLM_RES_ACCEPT;
+ struct ieee802_11_elems *elems = NULL;
+ struct ieee80211_neg_ttlm neg_ttlm = {};
+
+ BUILD_BUG_ON(ARRAY_SIZE(direction) != ARRAY_SIZE(elems->ttlm));
+
+ if (!ieee80211_vif_is_mld(&sdata->vif))
+ return;
+
+ dialog_token = mgmt->u.action.u.ttlm_req.dialog_token;
+ ies_len = len - offsetof(struct ieee80211_mgmt,
+ u.action.u.ttlm_req.variable);
+ elems = ieee802_11_parse_elems(mgmt->u.action.u.ttlm_req.variable,
+ ies_len, true, NULL);
+ if (!elems) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+
+ for (i = 0; i < elems->ttlm_num; i++) {
+ if (ieee80211_parse_neg_ttlm(sdata, elems->ttlm[i],
+ &neg_ttlm, &direction[i]) ||
+ (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH &&
+ elems->ttlm_num != 1)) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+ }
+
+ if (!elems->ttlm_num ||
+ (elems->ttlm_num == 2 && direction[0] == direction[1])) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if ((neg_ttlm.downlink[i] &&
+ (neg_ttlm.downlink[i] & ~sdata->vif.valid_links)) ||
+ (neg_ttlm.uplink[i] &&
+ (neg_ttlm.uplink[i] & ~sdata->vif.valid_links))) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+ }
+
+ ttlm_res = drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm);
+
+ if (ttlm_res != NEG_TTLM_RES_ACCEPT)
+ goto out;
+
+ ieee80211_apply_neg_ttlm(sdata, neg_ttlm);
+out:
+ kfree(elems);
+ ieee80211_send_neg_ttlm_res(sdata, ttlm_res, dialog_token, &neg_ttlm);
+}
+
+void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ mgmt->u.action.u.ttlm_req.dialog_token !=
+ sdata->u.mgd.dialog_token_alloc)
+ return;
+
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work);
+
+ /* MLD station sends a TID to link mapping request, mainly to handle
+ * BTM (BSS transition management) request, in which case it needs to
+ * restrict the active links set.
+ * In this case it's not expected that the MLD AP will reject the
+ * negotiated TTLM request.
+ * This can be better implemented in the future, to handle request
+ * rejections.
+ */
+ if (mgmt->u.action.u.ttlm_res.status_code != WLAN_STATUS_SUCCESS)
+ __ieee80211_disconnect(sdata);
+}
+
void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
@@ -7064,6 +7413,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_handle_tspec_ac_params_wk);
wiphy_delayed_work_init(&ifmgd->ttlm_work,
ieee80211_tid_to_link_map_work);
+ wiphy_delayed_work_init(&ifmgd->neg_ttlm_timeout_work,
+ ieee80211_neg_ttlm_timeout_work);
ifmgd->flags = 0;
ifmgd->powersave = sdata->wdev.ps;
@@ -7073,6 +7424,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
spin_lock_init(&ifmgd->teardown_lock);
ifmgd->teardown_skb = NULL;
ifmgd->orig_teardown_skb = NULL;
+ ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
}
static void ieee80211_recalc_smps_work(struct wiphy *wiphy,
@@ -7092,7 +7444,6 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
unsigned int link_id = link->link_id;
link->u.mgd.p2p_noa_index = -1;
- link->u.mgd.conn_flags = 0;
link->conf->bssid = link->u.mgd.bssid;
link->smps_mode = IEEE80211_SMPS_OFF;
@@ -7132,6 +7483,7 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss, s8 link_id,
const u8 *ap_mld_addr, bool assoc,
+ struct ieee80211_conn_settings *conn,
bool override)
{
struct ieee80211_local *local = sdata->local;
@@ -7263,13 +7615,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
}
if (new_sta || override) {
- err = ieee80211_prep_channel(sdata, link, cbss, mlo,
- &link->u.mgd.conn_flags);
+ /*
+ * Only set this if we're also going to calculate the AP
+ * settings etc., otherwise this was set before in a
+ * previous call. Note override is set to %true in assoc
+ * if the settings were changed.
+ */
+ link->u.mgd.conn = *conn;
+ err = ieee80211_prep_channel(sdata, link, link->link_id, cbss,
+ mlo, &link->u.mgd.conn);
if (err) {
if (new_sta)
sta_info_free(local, new_sta);
goto out_err;
}
+ /* pass out for use in assoc */
+ *conn = link->u.mgd.conn;
}
if (new_sta) {
@@ -7384,10 +7745,13 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_auth_data *auth_data;
+ struct ieee80211_conn_settings conn;
struct ieee80211_link_data *link;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_bss *bss;
u16 auth_alg;
int err;
- bool cont_auth;
+ bool cont_auth, wmm_used;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -7518,15 +7882,24 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
/* needed for transmitting the auth frame(s) properly */
memcpy(sdata->vif.cfg.ap_addr, auth_data->ap_addr, ETH_ALEN);
+ bss = (void *)req->bss->priv;
+ wmm_used = bss->wmm_used && (local->hw.queues >= IEEE80211_NUM_ACS);
+
+ sband = local->hw.wiphy->bands[req->bss->channel->band];
+
+ ieee80211_determine_our_sta_mode_auth(sdata, sband, req, wmm_used,
+ &conn);
+
err = ieee80211_prep_connection(sdata, req->bss, req->link_id,
- req->ap_mld_addr, cont_auth, false);
+ req->ap_mld_addr, cont_auth,
+ &conn, false);
if (err)
goto err_clear;
- if (req->link_id > 0)
+ if (req->link_id >= 0)
link = sdata_dereference(sdata->link[req->link_id], sdata);
else
- link = sdata_dereference(sdata->link[0], sdata);
+ link = &sdata->deflink;
if (WARN_ON(!link)) {
err = -ENOLINK;
@@ -7558,38 +7931,33 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return err;
}
-static ieee80211_conn_flags_t
+static void
ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data,
struct cfg80211_assoc_request *req,
- ieee80211_conn_flags_t conn_flags,
+ struct ieee80211_conn_settings *conn,
unsigned int link_id)
{
struct ieee80211_local *local = sdata->local;
const struct cfg80211_bss_ies *bss_ies;
struct ieee80211_supported_band *sband;
- const struct element *ht_elem, *vht_elem;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
- bool is_5ghz, is_6ghz;
cbss = assoc_data->link[link_id].bss;
if (WARN_ON(!cbss))
- return 0;
+ return;
bss = (void *)cbss->priv;
sband = local->hw.wiphy->bands[cbss->channel->band];
if (WARN_ON(!sband))
- return 0;
+ return;
link = sdata_dereference(sdata->link[link_id], sdata);
if (WARN_ON(!link))
- return 0;
-
- is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
- is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
+ return;
/* for MLO connections assume advertising all rates is OK */
if (!req->ap_mld_addr) {
@@ -7606,40 +7974,18 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
assoc_data->ie_pos += req->links[link_id].elems_len;
}
- rcu_read_lock();
- ht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION);
- if (ht_elem && ht_elem->datalen >= sizeof(struct ieee80211_ht_operation))
- assoc_data->link[link_id].ap_ht_param =
- ((struct ieee80211_ht_operation *)(ht_elem->data))->ht_param;
- else if (!is_6ghz)
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- vht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
- if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) {
- memcpy(&assoc_data->link[link_id].ap_vht_cap, vht_elem->data,
- sizeof(struct ieee80211_vht_cap));
- } else if (is_5ghz) {
- link_info(link,
- "VHT capa missing/short, disabling VHT/HE/EHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- }
- rcu_read_unlock();
-
link->u.mgd.beacon_crc_valid = false;
link->u.mgd.dtim_period = 0;
link->u.mgd.have_beacon = false;
- /* override HT/VHT configuration only if the AP and we support it */
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ /* override HT configuration only if the AP and we support it */
+ if (conn->mode >= IEEE80211_CONN_MODE_HT) {
struct ieee80211_sta_ht_cap sta_ht_cap;
memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
}
- link->conf->eht_puncturing = 0;
-
rcu_read_lock();
bss_ies = rcu_dereference(cbss->beacon_ies);
if (bss_ies) {
@@ -7660,7 +8006,6 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
}
if (bss_ies) {
- const struct ieee80211_eht_operation *eht_oper;
const struct element *elem;
elem = cfg80211_find_ext_elem(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION,
@@ -7677,32 +8022,6 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
link->conf->ema_ap = true;
else
link->conf->ema_ap = false;
-
- elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION,
- bss_ies->data, bss_ies->len);
- eht_oper = (const void *)(elem->data + 1);
-
- if (elem &&
- ieee80211_eht_oper_size_ok((const void *)(elem->data + 1),
- elem->datalen - 1) &&
- (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
- (eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
- const struct ieee80211_eht_operation_info *info =
- (void *)eht_oper->optional;
- const u8 *disable_subchannel_bitmap = info->optional;
- u16 bitmap;
-
- bitmap = get_unaligned_le16(disable_subchannel_bitmap);
- if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- &link->conf->chandef) &&
- !(bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING)))
- ieee80211_handle_puncturing_bitmap(link,
- eht_oper,
- bitmap,
- NULL);
- else
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
}
rcu_read_unlock();
@@ -7729,8 +8048,67 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
} else {
link->smps_mode = link->u.mgd.req_smps;
}
+}
+
+static int
+ieee80211_mgd_get_ap_ht_vht_capa(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_assoc_data *assoc_data,
+ int link_id)
+{
+ struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
+ enum nl80211_band band = cbss->channel->band;
+ struct ieee80211_supported_band *sband;
+ const struct element *elem;
+ int err;
+
+ /* neither HT nor VHT elements used on 6 GHz */
+ if (band == NL80211_BAND_6GHZ)
+ return 0;
+
+ if (assoc_data->link[link_id].conn.mode < IEEE80211_CONN_MODE_HT)
+ return 0;
- return conn_flags;
+ rcu_read_lock();
+ elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION);
+ if (!elem || elem->datalen < sizeof(struct ieee80211_ht_operation)) {
+ mlme_link_id_dbg(sdata, link_id, "no HT operation on BSS %pM\n",
+ cbss->bssid);
+ err = -EINVAL;
+ goto out_rcu;
+ }
+ assoc_data->link[link_id].ap_ht_param =
+ ((struct ieee80211_ht_operation *)(elem->data))->ht_param;
+ rcu_read_unlock();
+
+ if (assoc_data->link[link_id].conn.mode < IEEE80211_CONN_MODE_VHT)
+ return 0;
+
+ /* some drivers want to support VHT on 2.4 GHz even */
+ sband = sdata->local->hw.wiphy->bands[band];
+ if (!sband->vht_cap.vht_supported)
+ return 0;
+
+ rcu_read_lock();
+ elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
+ /* but even then accept it not being present on the AP */
+ if (!elem && band == NL80211_BAND_2GHZ) {
+ err = 0;
+ goto out_rcu;
+ }
+ if (!elem || elem->datalen < sizeof(struct ieee80211_vht_cap)) {
+ mlme_link_id_dbg(sdata, link_id, "no VHT capa on BSS %pM\n",
+ cbss->bssid);
+ err = -EINVAL;
+ goto out_rcu;
+ }
+ memcpy(&assoc_data->link[link_id].ap_vht_cap, elem->data,
+ sizeof(struct ieee80211_vht_cap));
+ rcu_read_unlock();
+
+ return 0;
+out_rcu:
+ rcu_read_unlock();
+ return err;
}
int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
@@ -7742,11 +8120,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data;
const struct element *ssid_elem;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
- ieee80211_conn_flags_t conn_flags = 0;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
- struct ieee80211_bss *bss;
- bool override;
+ bool override, uapsd_supported;
+ bool match_auth;
int i, err;
size_t size = sizeof(*assoc_data) + req->ie_len;
@@ -7765,44 +8142,26 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (ieee80211_mgd_csa_in_process(sdata, cbss)) {
sdata_info(sdata, "AP is in CSA process, reject assoc\n");
- kfree(assoc_data);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
rcu_read_lock();
ssid_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID);
if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) {
rcu_read_unlock();
- kfree(assoc_data);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen);
assoc_data->ssid_len = ssid_elem->datalen;
- memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
- vif_cfg->ssid_len = assoc_data->ssid_len;
rcu_read_unlock();
- if (req->ap_mld_addr) {
- for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- if (!req->links[i].bss)
- continue;
- link = sdata_dereference(sdata->link[i], sdata);
- if (link)
- ether_addr_copy(assoc_data->link[i].addr,
- link->conf->addr);
- else
- eth_random_addr(assoc_data->link[i].addr);
- }
- } else {
- memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN);
- }
-
- assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
-
- memcpy(assoc_data->ap_addr,
- req->ap_mld_addr ?: req->bss->bssid,
- ETH_ALEN);
+ if (req->ap_mld_addr)
+ memcpy(assoc_data->ap_addr, req->ap_mld_addr, ETH_ALEN);
+ else
+ memcpy(assoc_data->ap_addr, cbss->bssid, ETH_ALEN);
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@ -7820,98 +8179,146 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
false);
}
- if (ifmgd->auth_data && !ifmgd->auth_data->done) {
- err = -EBUSY;
- goto err_free;
- }
+ memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
+ memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
+ sizeof(ifmgd->ht_capa_mask));
- if (ifmgd->assoc_data) {
- err = -EBUSY;
- goto err_free;
- }
+ memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
+ memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
+ sizeof(ifmgd->vht_capa_mask));
- if (ifmgd->auth_data) {
- bool match;
+ memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
+ memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
+ sizeof(ifmgd->s1g_capa_mask));
- /* keep sta info, bssid if matching */
- match = ether_addr_equal(ifmgd->auth_data->ap_addr,
- assoc_data->ap_addr) &&
- ifmgd->auth_data->link_id == req->link_id;
+ /* keep some setup (AP STA, channel, ...) if matching */
+ match_auth = ifmgd->auth_data &&
+ ether_addr_equal(ifmgd->auth_data->ap_addr,
+ assoc_data->ap_addr) &&
+ ifmgd->auth_data->link_id == req->link_id;
- /* Cleanup is delayed if auth_data matches */
- if (!match)
- ieee80211_destroy_auth_data(sdata, false);
- }
+ if (req->ap_mld_addr) {
+ uapsd_supported = true;
- /* prepare assoc data */
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ struct ieee80211_supported_band *sband;
+ struct cfg80211_bss *link_cbss = req->links[i].bss;
+ struct ieee80211_bss *bss;
- bss = (void *)cbss->priv;
- assoc_data->wmm = bss->wmm_used &&
- (local->hw.queues >= IEEE80211_NUM_ACS);
+ if (!link_cbss)
+ continue;
- /*
- * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
- * We still associate in non-HT mode (11a/b/g) if any one of these
- * ciphers is configured as pairwise.
- * We can set this to true for non-11n hardware, that'll be checked
- * separately along with the peer capabilities.
- */
- for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
- if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- netdev_info(sdata->dev,
- "disabling HT/VHT/HE due to WEP/TKIP use\n");
+ bss = (void *)link_cbss->priv;
+
+ if (!bss->wmm_used) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (req->flags & (ASSOC_REQ_DISABLE_HT |
+ ASSOC_REQ_DISABLE_VHT |
+ ASSOC_REQ_DISABLE_HE |
+ ASSOC_REQ_DISABLE_EHT)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (link_cbss->channel->band == NL80211_BAND_S1GHZ) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ link = sdata_dereference(sdata->link[i], sdata);
+ if (link)
+ ether_addr_copy(assoc_data->link[i].addr,
+ link->conf->addr);
+ else
+ eth_random_addr(assoc_data->link[i].addr);
+ sband = local->hw.wiphy->bands[link_cbss->channel->band];
+
+ if (match_auth && i == assoc_link_id && link)
+ assoc_data->link[i].conn = link->u.mgd.conn;
+ else
+ assoc_data->link[i].conn =
+ ieee80211_conn_settings_unlimited;
+ ieee80211_determine_our_sta_mode_assoc(sdata, sband,
+ req, true, i,
+ &assoc_data->link[i].conn);
+ assoc_data->link[i].bss = link_cbss;
+ assoc_data->link[i].disabled = req->links[i].disabled;
+
+ if (!bss->uapsd_supported)
+ uapsd_supported = false;
+
+ if (assoc_data->link[i].conn.mode < IEEE80211_CONN_MODE_EHT) {
+ err = -EINVAL;
+ req->links[i].error = err;
+ goto err_free;
+ }
+
+ err = ieee80211_mgd_get_ap_ht_vht_capa(sdata,
+ assoc_data, i);
+ if (err) {
+ err = -EINVAL;
+ req->links[i].error = err;
+ goto err_free;
+ }
}
- }
- /* also disable HT/VHT/HE/EHT if the AP doesn't use WMM */
- if (!bss->wmm_used) {
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- netdev_info(sdata->dev,
- "disabling HT/VHT/HE as WMM/QoS is not supported by the AP\n");
- }
+ assoc_data->wmm = true;
+ } else {
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_bss *bss = (void *)cbss->priv;
- if (req->flags & ASSOC_REQ_DISABLE_HT) {
- mlme_dbg(sdata, "HT disabled by flag, disabling HT/VHT/HE\n");
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
+ memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN);
+ assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
- if (req->flags & ASSOC_REQ_DISABLE_VHT) {
- mlme_dbg(sdata, "VHT disabled by flag, disabling VHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- }
+ assoc_data->wmm = bss->wmm_used &&
+ (local->hw.queues >= IEEE80211_NUM_ACS);
- if (req->flags & ASSOC_REQ_DISABLE_HE) {
- mlme_dbg(sdata, "HE disabled by flag, disabling HE/EHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ if (cbss->channel->band == NL80211_BAND_6GHZ &&
+ req->flags & (ASSOC_REQ_DISABLE_HT |
+ ASSOC_REQ_DISABLE_VHT |
+ ASSOC_REQ_DISABLE_HE)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ sband = local->hw.wiphy->bands[cbss->channel->band];
+
+ assoc_data->link[0].bss = cbss;
+
+ if (match_auth)
+ assoc_data->link[0].conn = sdata->deflink.u.mgd.conn;
+ else
+ assoc_data->link[0].conn =
+ ieee80211_conn_settings_unlimited;
+ ieee80211_determine_our_sta_mode_assoc(sdata, sband, req,
+ assoc_data->wmm, 0,
+ &assoc_data->link[0].conn);
+
+ uapsd_supported = bss->uapsd_supported;
+
+ err = ieee80211_mgd_get_ap_ht_vht_capa(sdata, assoc_data, 0);
+ if (err)
+ goto err_free;
}
- if (req->flags & ASSOC_REQ_DISABLE_EHT)
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ assoc_data->spp_amsdu = req->flags & ASSOC_REQ_SPP_AMSDU;
- memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
- memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
- sizeof(ifmgd->ht_capa_mask));
+ if (ifmgd->auth_data && !ifmgd->auth_data->done) {
+ err = -EBUSY;
+ goto err_free;
+ }
- memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
- memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
- sizeof(ifmgd->vht_capa_mask));
+ if (ifmgd->assoc_data) {
+ err = -EBUSY;
+ goto err_free;
+ }
- memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
- memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
- sizeof(ifmgd->s1g_capa_mask));
+ /* Cleanup is delayed if auth_data matches */
+ if (ifmgd->auth_data && !match_auth)
+ ieee80211_destroy_auth_data(sdata, false);
if (req->ie && req->ie_len) {
memcpy(assoc_data->ie, req->ie, req->ie_len);
@@ -7943,19 +8350,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
assoc_data->assoc_link_id = assoc_link_id;
if (req->ap_mld_addr) {
- for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) {
- assoc_data->link[i].conn_flags = conn_flags;
- assoc_data->link[i].bss = req->links[i].bss;
- assoc_data->link[i].disabled = req->links[i].disabled;
- }
-
/* if there was no authentication, set up the link */
err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id), 0);
if (err)
goto err_clear;
- } else {
- assoc_data->link[0].conn_flags = conn_flags;
- assoc_data->link[0].bss = cbss;
}
link = sdata_dereference(sdata->link[assoc_link_id], sdata);
@@ -7964,19 +8362,21 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
goto err_clear;
}
- /* keep old conn_flags from ieee80211_prep_channel() from auth */
- conn_flags |= link->u.mgd.conn_flags;
- conn_flags |= ieee80211_setup_assoc_link(sdata, assoc_data, req,
- conn_flags, assoc_link_id);
- override = link->u.mgd.conn_flags != conn_flags;
- link->u.mgd.conn_flags |= conn_flags;
+ override = link->u.mgd.conn.mode !=
+ assoc_data->link[assoc_link_id].conn.mode ||
+ link->u.mgd.conn.bw_limit !=
+ assoc_data->link[assoc_link_id].conn.bw_limit;
+ link->u.mgd.conn = assoc_data->link[assoc_link_id].conn;
+
+ ieee80211_setup_assoc_link(sdata, assoc_data, req, &link->u.mgd.conn,
+ assoc_link_id);
if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) &&
ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK),
"U-APSD not supported with HW_PS_NULLFUNC_STACK\n"))
sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
- if (bss->wmm_used && bss->uapsd_supported &&
+ if (assoc_data->wmm && uapsd_supported &&
(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD)) {
assoc_data->uapsd = true;
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
@@ -8020,27 +8420,29 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
continue;
if (i == assoc_data->assoc_link_id)
continue;
- /* only calculate the flags, hence link == NULL */
- err = ieee80211_prep_channel(sdata, NULL,
+ /* only calculate the mode, hence link == NULL */
+ err = ieee80211_prep_channel(sdata, NULL, i,
assoc_data->link[i].bss, true,
- &assoc_data->link[i].conn_flags);
+ &assoc_data->link[i].conn);
if (err) {
req->links[i].error = err;
goto err_clear;
}
}
+ memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
+ vif_cfg->ssid_len = assoc_data->ssid_len;
+
/* needed for transmitting the assoc frames properly */
memcpy(sdata->vif.cfg.ap_addr, assoc_data->ap_addr, ETH_ALEN);
err = ieee80211_prep_connection(sdata, cbss, req->link_id,
- req->ap_mld_addr, true, override);
+ req->ap_mld_addr, true,
+ &assoc_data->link[assoc_link_id].conn,
+ override);
if (err)
goto err_clear;
- assoc_data->link[assoc_data->assoc_link_id].conn_flags =
- link->u.mgd.conn_flags;
-
if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC)) {
const struct cfg80211_bss_ies *beacon_ies;
@@ -8204,6 +8606,8 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
&ifmgd->ml_reconf_work);
wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->neg_ttlm_timeout_work);
if (ifmgd->assoc_data)
ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT);
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 449af4e1cca4..9ef14e475c90 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -168,6 +168,7 @@ void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
struct ocb_setup *setup)
{
+ struct ieee80211_chan_req chanreq = { .oper = setup->chandef };
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
u64 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
@@ -182,7 +183,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
return err;
@@ -207,7 +208,7 @@ int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(sdata->local->hw.wiphy);
ifocb->joined = false;
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
spin_lock_bh(&ifocb->incomplete_lock);
while (!list_empty(&ifocb->incomplete_stations)) {
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 6c4080202573..221695d841fd 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -86,7 +86,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
lockdep_assert_wiphy(local->hw.wiphy);
- if (WARN_ON(local->use_chanctx))
+ if (WARN_ON(!local->emulate_chanctx))
return;
/*
@@ -136,7 +136,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
lockdep_assert_wiphy(local->hw.wiphy);
- if (WARN_ON(local->use_chanctx))
+ if (WARN_ON(!local->emulate_chanctx))
return;
list_for_each_entry(sdata, &local->interfaces, list) {
@@ -351,10 +351,13 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
* 20 MHz channel width) don't stop all the operations but still
* treat it as though the ROC operation started properly, so
* other ROC operations won't interfere with this one.
+ *
+ * Note: scan can't run, tmp_channel is what we use, so this
+ * must be the currently active channel.
*/
- roc->on_channel = roc->chan == local->_oper_chandef.chan &&
- local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
- local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
+ roc->on_channel = roc->chan == local->hw.conf.chandef.chan &&
+ local->hw.conf.chandef.width != NL80211_CHAN_WIDTH_5 &&
+ local->hw.conf.chandef.width != NL80211_CHAN_WIDTH_10;
/* start this ROC */
ieee80211_recalc_idle(local);
@@ -363,7 +366,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
ieee80211_offchannel_stop_vifs(local);
local->tmp_channel = roc->chan;
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
}
wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
@@ -426,7 +429,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
return;
if (!roc->started) {
- WARN_ON(local->use_chanctx);
+ WARN_ON(!local->emulate_chanctx);
_ieee80211_start_next_roc(local);
} else {
on_channel = roc->on_channel;
@@ -439,7 +442,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
ieee80211_flush_queues(local, NULL, false);
local->tmp_channel = NULL;
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
ieee80211_offchannel_return(local);
}
@@ -539,7 +542,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
/* this may work, but is untested */
return -EOPNOTSUPP;
- if (local->use_chanctx && !local->ops->remain_on_channel)
+ if (!local->emulate_chanctx && !local->ops->remain_on_channel)
return -EOPNOTSUPP;
roc = kzalloc(sizeof(*roc), GFP_KERNEL);
diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
new file mode 100644
index 000000000000..55e5497f8978
--- /dev/null
+++ b/net/mac80211/parse.c
@@ -0,0 +1,971 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2002-2005, Instant802 Networks, Inc.
+ * Copyright 2005-2006, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2024 Intel Corporation
+ *
+ * element parsing for mac80211
+ */
+
+#include <net/mac80211.h>
+#include <linux/netdevice.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/bitmap.h>
+#include <linux/crc32.h>
+#include <net/net_namespace.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <kunit/visibility.h>
+
+#include "ieee80211_i.h"
+#include "driver-ops.h"
+#include "rate.h"
+#include "mesh.h"
+#include "wme.h"
+#include "led.h"
+#include "wep.h"
+
+struct ieee80211_elems_parse {
+ /* must be first for kfree to work */
+ struct ieee802_11_elems elems;
+
+ /* The basic Multi-Link element in the original elements */
+ const struct element *ml_basic_elem;
+
+ /* The reconfiguration Multi-Link element in the original elements */
+ const struct element *ml_reconf_elem;
+
+ /*
+ * scratch buffer that can be used for various element parsing related
+ * tasks, e.g., element de-fragmentation etc.
+ */
+ size_t scratch_len;
+ u8 *scratch_pos;
+ u8 scratch[] __counted_by(scratch_len);
+};
+
+static void
+ieee80211_parse_extension_element(u32 *crc,
+ const struct element *elem,
+ struct ieee80211_elems_parse *elems_parse,
+ struct ieee80211_elems_parse_params *params)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ const void *data = elem->data + 1;
+ bool calc_crc = false;
+ u8 len;
+
+ if (!elem->datalen)
+ return;
+
+ len = elem->datalen - 1;
+
+ switch (elem->data[0]) {
+ case WLAN_EID_EXT_HE_MU_EDCA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ calc_crc = true;
+ if (len >= sizeof(*elems->mu_edca_param_set))
+ elems->mu_edca_param_set = data;
+ break;
+ case WLAN_EID_EXT_HE_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (ieee80211_he_capa_size_ok(data, len)) {
+ elems->he_cap = data;
+ elems->he_cap_len = len;
+ }
+ break;
+ case WLAN_EID_EXT_HE_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ calc_crc = true;
+ if (len >= sizeof(*elems->he_operation) &&
+ len >= ieee80211_he_oper_size(data) - 1)
+ elems->he_operation = data;
+ break;
+ case WLAN_EID_EXT_UORA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= 1)
+ elems->uora_element = data;
+ break;
+ case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
+ if (len == 3)
+ elems->max_channel_switch_time = data;
+ break;
+ case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
+ if (len >= sizeof(*elems->mbssid_config_ie))
+ elems->mbssid_config_ie = data;
+ break;
+ case WLAN_EID_EXT_HE_SPR:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= sizeof(*elems->he_spr) &&
+ len >= ieee80211_he_spr_size(data))
+ elems->he_spr = data;
+ break;
+ case WLAN_EID_EXT_HE_6GHZ_CAPA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= sizeof(*elems->he_6ghz_capa))
+ elems->he_6ghz_capa = data;
+ break;
+ case WLAN_EID_EXT_EHT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_eht_capa_size_ok(elems->he_cap,
+ data, len,
+ params->from_ap)) {
+ elems->eht_cap = data;
+ elems->eht_cap_len = len;
+ }
+ break;
+ case WLAN_EID_EXT_EHT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_eht_oper_size_ok(data, len))
+ elems->eht_operation = data;
+ calc_crc = true;
+ break;
+ case WLAN_EID_EXT_EHT_MULTI_LINK:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ calc_crc = true;
+
+ if (ieee80211_mle_size_ok(data, len)) {
+ const struct ieee80211_multi_link_elem *mle =
+ (void *)data;
+
+ switch (le16_get_bits(mle->control,
+ IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ if (elems_parse->ml_basic_elem) {
+ elems->parse_error |=
+ IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC;
+ break;
+ }
+ elems_parse->ml_basic_elem = elem;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ elems_parse->ml_reconf_elem = elem;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case WLAN_EID_EXT_BANDWIDTH_INDICATION:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_bandwidth_indication_size_ok(data, len))
+ elems->bandwidth_indication = data;
+ calc_crc = true;
+ break;
+ case WLAN_EID_EXT_TID_TO_LINK_MAPPING:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ calc_crc = true;
+ if (ieee80211_tid_to_link_map_size_ok(data, len) &&
+ elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) {
+ elems->ttlm[elems->ttlm_num] = (void *)data;
+ elems->ttlm_num++;
+ }
+ break;
+ }
+
+ if (crc && calc_crc)
+ *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
+}
+
+static u32
+_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
+ struct ieee80211_elems_parse *elems_parse,
+ const struct element *check_inherit)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ const struct element *elem;
+ bool calc_crc = params->filter != 0;
+ DECLARE_BITMAP(seen_elems, 256);
+ u32 crc = params->crc;
+
+ bitmap_zero(seen_elems, 256);
+
+ for_each_element(elem, params->start, params->len) {
+ const struct element *subelem;
+ u8 elem_parse_failed;
+ u8 id = elem->id;
+ u8 elen = elem->datalen;
+ const u8 *pos = elem->data;
+
+ if (check_inherit &&
+ !cfg80211_is_element_inherited(elem,
+ check_inherit))
+ continue;
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ case WLAN_EID_SUPP_RATES:
+ case WLAN_EID_FH_PARAMS:
+ case WLAN_EID_DS_PARAMS:
+ case WLAN_EID_CF_PARAMS:
+ case WLAN_EID_TIM:
+ case WLAN_EID_IBSS_PARAMS:
+ case WLAN_EID_CHALLENGE:
+ case WLAN_EID_RSN:
+ case WLAN_EID_ERP_INFO:
+ case WLAN_EID_EXT_SUPP_RATES:
+ case WLAN_EID_HT_CAPABILITY:
+ case WLAN_EID_HT_OPERATION:
+ case WLAN_EID_VHT_CAPABILITY:
+ case WLAN_EID_VHT_OPERATION:
+ case WLAN_EID_MESH_ID:
+ case WLAN_EID_MESH_CONFIG:
+ case WLAN_EID_PEER_MGMT:
+ case WLAN_EID_PREQ:
+ case WLAN_EID_PREP:
+ case WLAN_EID_PERR:
+ case WLAN_EID_RANN:
+ case WLAN_EID_CHANNEL_SWITCH:
+ case WLAN_EID_EXT_CHANSWITCH_ANN:
+ case WLAN_EID_COUNTRY:
+ case WLAN_EID_PWR_CONSTRAINT:
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
+ case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+ case WLAN_EID_CHAN_SWITCH_PARAM:
+ case WLAN_EID_EXT_CAPABILITY:
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ case WLAN_EID_LINK_ID:
+ case WLAN_EID_BSS_MAX_IDLE_PERIOD:
+ case WLAN_EID_RSNX:
+ case WLAN_EID_S1G_BCN_COMPAT:
+ case WLAN_EID_S1G_CAPABILITIES:
+ case WLAN_EID_S1G_OPERATION:
+ case WLAN_EID_AID_RESPONSE:
+ case WLAN_EID_S1G_SHORT_BCN_INTERVAL:
+ /*
+ * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
+ * that if the content gets bigger it might be needed more than once
+ */
+ if (test_bit(id, seen_elems)) {
+ elems->parse_error |=
+ IEEE80211_PARSE_ERR_DUP_ELEM;
+ continue;
+ }
+ break;
+ }
+
+ if (calc_crc && id < 64 && (params->filter & (1ULL << id)))
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ elem_parse_failed = 0;
+
+ switch (id) {
+ case WLAN_EID_LINK_ID:
+ if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->lnk_id = (void *)(pos - 2);
+ break;
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ch_sw_timing = (void *)pos;
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ elems->ext_capab = pos;
+ elems->ext_capab_len = elen;
+ break;
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ if (elen >= 1)
+ elems->ds_params = pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_TIM:
+ if (elen >= sizeof(struct ieee80211_tim_ie)) {
+ elems->tim = (void *)pos;
+ elems->tim_len = elen;
+ } else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+ pos[2] == 0xf2) {
+ /* Microsoft OUI (00:50:F2) */
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ if (elen >= 5 && pos[3] == 2) {
+ /* OUI Type 2 - WMM IE */
+ if (pos[4] == 0) {
+ elems->wmm_info = pos;
+ elems->wmm_info_len = elen;
+ } else if (pos[4] == 1) {
+ elems->wmm_param = pos;
+ elems->wmm_param_len = elen;
+ }
+ }
+ }
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn = pos;
+ elems->rsn_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ if (elen >= 1)
+ elems->erp_info = pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen >= sizeof(struct ieee80211_ht_cap))
+ elems->ht_cap_elem = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen >= sizeof(struct ieee80211_ht_operation))
+ elems->ht_operation = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen >= sizeof(struct ieee80211_vht_cap))
+ elems->vht_cap_elem = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen >= sizeof(struct ieee80211_vht_operation)) {
+ elems->vht_operation = (void *)pos;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_OPMODE_NOTIF:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen > 0) {
+ elems->opmode_notif = pos;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_MESH_ID:
+ elems->mesh_id = pos;
+ elems->mesh_id_len = elen;
+ break;
+ case WLAN_EID_MESH_CONFIG:
+ if (elen >= sizeof(struct ieee80211_meshconf_ie))
+ elems->mesh_config = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_PEER_MGMT:
+ elems->peering = pos;
+ elems->peering_len = elen;
+ break;
+ case WLAN_EID_MESH_AWAKE_WINDOW:
+ if (elen >= 2)
+ elems->awake_window = (void *)pos;
+ break;
+ case WLAN_EID_PREQ:
+ elems->preq = pos;
+ elems->preq_len = elen;
+ break;
+ case WLAN_EID_PREP:
+ elems->prep = pos;
+ elems->prep_len = elen;
+ break;
+ case WLAN_EID_PERR:
+ elems->perr = pos;
+ elems->perr_len = elen;
+ break;
+ case WLAN_EID_RANN:
+ if (elen >= sizeof(struct ieee80211_rann_ie))
+ elems->rann = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH:
+ if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ch_switch_ie = (void *)pos;
+ break;
+ case WLAN_EID_EXT_CHANSWITCH_ANN:
+ if (elen != sizeof(struct ieee80211_ext_chansw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ext_chansw_ie = (void *)pos;
+ break;
+ case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->sec_chan_offs = (void *)pos;
+ break;
+ case WLAN_EID_CHAN_SWITCH_PARAM:
+ if (elen <
+ sizeof(*elems->mesh_chansw_params_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->mesh_chansw_params_ie = (void *)pos;
+ break;
+ case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+
+ if (!params->action) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM;
+ break;
+ }
+
+ if (elen < sizeof(*elems->wide_bw_chansw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->wide_bw_chansw_ie = (void *)pos;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH_WRAPPER:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (params->action) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM;
+ break;
+ }
+ /*
+ * This is a bit tricky, but as we only care about
+ * a few elements, parse them out manually.
+ */
+ subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
+ pos, elen);
+ if (subelem) {
+ if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie))
+ elems->wide_bw_chansw_ie =
+ (void *)subelem->data;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ }
+
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+
+ subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION,
+ pos, elen);
+ if (subelem) {
+ const void *edata = subelem->data + 1;
+ u8 edatalen = subelem->datalen - 1;
+
+ if (ieee80211_bandwidth_indication_size_ok(edata,
+ edatalen))
+ elems->bandwidth_indication = edata;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ }
+ break;
+ case WLAN_EID_COUNTRY:
+ elems->country_elem = pos;
+ elems->country_elem_len = elen;
+ break;
+ case WLAN_EID_PWR_CONSTRAINT:
+ if (elen != 1) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->pwr_constr_elem = pos;
+ break;
+ case WLAN_EID_CISCO_VENDOR_SPECIFIC:
+ /* Lots of different options exist, but we only care
+ * about the Dynamic Transmit Power Control element.
+ * First check for the Cisco OUI, then for the DTPC
+ * tag (0x00).
+ */
+ if (elen < 4) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+
+ if (pos[0] != 0x00 || pos[1] != 0x40 ||
+ pos[2] != 0x96 || pos[3] != 0x00)
+ break;
+
+ if (elen != 6) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ elems->cisco_dtpc_elem = pos;
+ break;
+ case WLAN_EID_ADDBA_EXT:
+ if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->addba_ext_ie = (void *)pos;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ if (elen >= sizeof(struct ieee80211_timeout_interval_ie))
+ elems->timeout_int = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_BSS_MAX_IDLE_PERIOD:
+ if (elen >= sizeof(*elems->max_idle_period_ie))
+ elems->max_idle_period_ie = (void *)pos;
+ break;
+ case WLAN_EID_RSNX:
+ elems->rsnx = pos;
+ elems->rsnx_len = elen;
+ break;
+ case WLAN_EID_TX_POWER_ENVELOPE:
+ if (elen < 1 ||
+ elen > sizeof(struct ieee80211_tx_pwr_env))
+ break;
+
+ if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env))
+ break;
+
+ elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos;
+ elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen;
+ elems->tx_pwr_env_num++;
+ break;
+ case WLAN_EID_EXTENSION:
+ ieee80211_parse_extension_element(calc_crc ?
+ &crc : NULL,
+ elem, elems_parse,
+ params);
+ break;
+ case WLAN_EID_S1G_CAPABILITIES:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen >= sizeof(*elems->s1g_capab))
+ elems->s1g_capab = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_S1G_OPERATION:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(*elems->s1g_oper))
+ elems->s1g_oper = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_S1G_BCN_COMPAT:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(*elems->s1g_bcn_compat))
+ elems->s1g_bcn_compat = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_AID_RESPONSE:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(struct ieee80211_aid_response_ie))
+ elems->aid_resp = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ default:
+ break;
+ }
+
+ if (elem_parse_failed)
+ elems->parse_error |= elem_parse_failed;
+ else
+ __set_bit(id, seen_elems);
+ }
+
+ if (!for_each_element_completed(elem, params->start, params->len))
+ elems->parse_error |= IEEE80211_PARSE_ERR_INVALID_END;
+
+ return crc;
+}
+
+static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
+ struct ieee802_11_elems *elems,
+ struct cfg80211_bss *bss,
+ u8 *nontransmitted_profile)
+{
+ const struct element *elem, *sub;
+ size_t profile_len = 0;
+ bool found = false;
+
+ if (!bss || !bss->transmitted_bss)
+ return profile_len;
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
+ if (elem->datalen < 2)
+ continue;
+ if (elem->data[0] < 1 || elem->data[0] > 8)
+ continue;
+
+ for_each_element(sub, elem->data + 1, elem->datalen - 1) {
+ u8 new_bssid[ETH_ALEN];
+ const u8 *index;
+
+ if (sub->id != 0 || sub->datalen < 4) {
+ /* not a valid BSS profile */
+ continue;
+ }
+
+ if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ sub->data[1] != 2) {
+ /* The first element of the
+ * Nontransmitted BSSID Profile is not
+ * the Nontransmitted BSSID Capability
+ * element.
+ */
+ continue;
+ }
+
+ memset(nontransmitted_profile, 0, len);
+ profile_len = cfg80211_merge_profile(start, len,
+ elem,
+ sub,
+ nontransmitted_profile,
+ len);
+
+ /* found a Nontransmitted BSSID Profile */
+ index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ nontransmitted_profile,
+ profile_len);
+ if (!index || index[1] < 1 || index[2] == 0) {
+ /* Invalid MBSSID Index element */
+ continue;
+ }
+
+ cfg80211_gen_new_bssid(bss->transmitted_bss->bssid,
+ elem->data[0],
+ index[2],
+ new_bssid);
+ if (ether_addr_equal(new_bssid, bss->bssid)) {
+ found = true;
+ elems->bssid_index_len = index[1];
+ elems->bssid_index = (void *)&index[2];
+ break;
+ }
+ }
+ }
+
+ return found ? profile_len : 0;
+}
+
+static void
+ieee80211_mle_get_sta_prof(struct ieee80211_elems_parse *elems_parse,
+ u8 link_id)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ const struct ieee80211_multi_link_elem *ml = elems->ml_basic;
+ ssize_t ml_len = elems->ml_basic_len;
+ const struct element *sub;
+
+ for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
+ struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
+ ssize_t sta_prof_len;
+ u16 control;
+
+ if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
+ continue;
+
+ if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
+ sub->datalen))
+ return;
+
+ control = le16_to_cpu(prof->control);
+
+ if (link_id != u16_get_bits(control,
+ IEEE80211_MLE_STA_CONTROL_LINK_ID))
+ continue;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
+ return;
+
+ /* the sub element can be fragmented */
+ sta_prof_len =
+ cfg80211_defragment_element(sub,
+ (u8 *)ml, ml_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+
+ if (sta_prof_len < 0)
+ return;
+
+ elems->prof = (void *)elems_parse->scratch_pos;
+ elems->sta_prof_len = sta_prof_len;
+ elems_parse->scratch_pos += sta_prof_len;
+
+ return;
+ }
+}
+
+static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse,
+ struct ieee80211_elems_parse_params *params)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ struct ieee80211_mle_per_sta_profile *prof;
+ struct ieee80211_elems_parse_params sub = {
+ .mode = params->mode,
+ .action = params->action,
+ .from_ap = params->from_ap,
+ .link_id = -1,
+ };
+ ssize_t ml_len = elems->ml_basic_len;
+ const struct element *non_inherit = NULL;
+ const u8 *end;
+
+ ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem,
+ elems->ie_start,
+ elems->total_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ WLAN_EID_FRAGMENT);
+
+ if (ml_len < 0)
+ return;
+
+ elems->ml_basic = (const void *)elems_parse->scratch_pos;
+ elems->ml_basic_len = ml_len;
+ elems_parse->scratch_pos += ml_len;
+
+ if (params->link_id == -1)
+ return;
+
+ ieee80211_mle_get_sta_prof(elems_parse, params->link_id);
+ prof = elems->prof;
+
+ if (!prof)
+ return;
+
+ /* check if we have the 4 bytes for the fixed part in assoc response */
+ if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
+ elems->prof = NULL;
+ elems->sta_prof_len = 0;
+ return;
+ }
+
+ /*
+ * Skip the capability information and the status code that are expected
+ * as part of the station profile in association response frames. Note
+ * the -1 is because the 'sta_info_len' is accounted to as part of the
+ * per-STA profile, but not part of the 'u8 variable[]' portion.
+ */
+ sub.start = prof->variable + prof->sta_info_len - 1 + 4;
+ end = (const u8 *)prof + elems->sta_prof_len;
+ sub.len = end - sub.start;
+
+ non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ sub.start, sub.len);
+ _ieee802_11_parse_elems_full(&sub, elems_parse, non_inherit);
+}
+
+static void
+ieee80211_mle_defrag_reconf(struct ieee80211_elems_parse *elems_parse)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ ssize_t ml_len;
+
+ ml_len = cfg80211_defragment_element(elems_parse->ml_reconf_elem,
+ elems->ie_start,
+ elems->total_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ WLAN_EID_FRAGMENT);
+ if (ml_len < 0)
+ return;
+ elems->ml_reconf = (void *)elems_parse->scratch_pos;
+ elems->ml_reconf_len = ml_len;
+ elems_parse->scratch_pos += ml_len;
+}
+
+struct ieee802_11_elems *
+ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
+{
+ struct ieee80211_elems_parse *elems_parse;
+ struct ieee802_11_elems *elems;
+ const struct element *non_inherit = NULL;
+ u8 *nontransmitted_profile;
+ int nontransmitted_profile_len = 0;
+ size_t scratch_len = 3 * params->len;
+
+ BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0);
+
+ elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len),
+ GFP_ATOMIC);
+ if (!elems_parse)
+ return NULL;
+
+ elems_parse->scratch_len = scratch_len;
+ elems_parse->scratch_pos = elems_parse->scratch;
+
+ elems = &elems_parse->elems;
+ elems->ie_start = params->start;
+ elems->total_len = params->len;
+
+ nontransmitted_profile = elems_parse->scratch_pos;
+ nontransmitted_profile_len =
+ ieee802_11_find_bssid_profile(params->start, params->len,
+ elems, params->bss,
+ nontransmitted_profile);
+ elems_parse->scratch_pos += nontransmitted_profile_len;
+ non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ nontransmitted_profile,
+ nontransmitted_profile_len);
+
+ elems->crc = _ieee802_11_parse_elems_full(params, elems_parse,
+ non_inherit);
+
+ /* Override with nontransmitted profile, if found */
+ if (nontransmitted_profile_len) {
+ struct ieee80211_elems_parse_params sub = {
+ .mode = params->mode,
+ .start = nontransmitted_profile,
+ .len = nontransmitted_profile_len,
+ .action = params->action,
+ .link_id = params->link_id,
+ };
+
+ _ieee802_11_parse_elems_full(&sub, elems_parse, NULL);
+ }
+
+ ieee80211_mle_parse_link(elems_parse, params);
+
+ ieee80211_mle_defrag_reconf(elems_parse);
+
+ if (elems->tim && !elems->parse_error) {
+ const struct ieee80211_tim_ie *tim_ie = elems->tim;
+
+ elems->dtim_period = tim_ie->dtim_period;
+ elems->dtim_count = tim_ie->dtim_count;
+ }
+
+ /* Override DTIM period and count if needed */
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_period))
+ elems->dtim_period = elems->bssid_index->dtim_period;
+
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_count))
+ elems->dtim_count = elems->bssid_index->dtim_count;
+
+ return elems;
+}
+EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full);
+
+int ieee80211_parse_bitrates(enum nl80211_chan_width width,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates)
+{
+ u32 rate_flags = ieee80211_chanwidth_rate_flags(width);
+ struct ieee80211_rate *br;
+ int brate, rate, i, j, count = 0;
+
+ *rates = 0;
+
+ for (i = 0; i < srates_len; i++) {
+ rate = srates[i] & 0x7f;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, 5);
+ if (brate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 9d33fd2377c8..23404b275457 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -4,7 +4,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2019, 2022-2024 Intel Corporation
*/
#include <linux/kernel.h>
@@ -37,7 +37,7 @@ void rate_control_rate_init(struct sta_info *sta)
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *chanctx_conf;
- ieee80211_sta_set_rx_nss(&sta->deflink);
+ ieee80211_sta_init_nss(&sta->deflink);
if (!ref)
return;
@@ -279,10 +279,10 @@ void ieee80211_check_rate_mask(struct ieee80211_link_data *link)
u32 user_mask, basic_rates = link->conf->basic_rates;
enum nl80211_band band;
- if (WARN_ON(!link->conf->chandef.chan))
+ if (WARN_ON(!link->conf->chanreq.oper.chan))
return;
- band = link->conf->chandef.chan->band;
+ band = link->conf->chanreq.oper.chan->band;
if (band == NL80211_BAND_S1GHZ) {
/* TODO */
return;
@@ -762,7 +762,7 @@ static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
u32 i, flags;
*mask = sdata->rc_rateidx_mask[sband->band];
- flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
for (i = 0; i < sband->n_bitrates; i++) {
if ((flags & sband->bitrates[i].flags) != flags)
*mask &= ~BIT(i);
@@ -818,7 +818,7 @@ rate_control_apply_mask_ratetbl(struct sta_info *sta,
mcs_mask, vht_mask))
return;
- chan_width = sta->sdata->vif.bss_conf.chandef.width;
+ chan_width = sta->sdata->vif.bss_conf.chanreq.oper.width;
for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
if (rates->rate[i].idx < 0)
break;
@@ -855,7 +855,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
* included in the configured mask and change the rate indexes
* if needed.
*/
- chan_width = sdata->vif.bss_conf.chandef.width;
+ chan_width = sdata->vif.bss_conf.chanreq.oper.width;
for (i = 0; i < max_rates; i++) {
/* Skip invalid rates */
if (rates[i].idx < 0)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0bf72928ccfc..c1f850138405 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/jiffies.h>
@@ -1251,8 +1251,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- u16 sc = le16_to_cpu(hdr->seq_ctrl);
- u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
+ u16 mpdu_seq_num = ieee80211_get_sn(hdr);
u16 head_seq_num, buf_size;
int index;
bool ret = true;
@@ -1435,13 +1434,31 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_any_nullfunc(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ ieee80211_is_any_nullfunc(hdr->frame_control))
return RX_CONTINUE;
if (!rx->sta)
return RX_CONTINUE;
+ if (unlikely(is_multicast_ether_addr(hdr->addr1))) {
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ u16 sn = ieee80211_get_sn(hdr);
+
+ if (!ieee80211_is_data_present(hdr->frame_control))
+ return RX_CONTINUE;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ sdata->vif.type != NL80211_IFTYPE_STATION)
+ return RX_CONTINUE;
+
+ if (sdata->u.mgd.mcast_seq_last != IEEE80211_SN_MODULO &&
+ ieee80211_sn_less_eq(sn, sdata->u.mgd.mcast_seq_last))
+ return RX_DROP_U_DUP;
+
+ sdata->u.mgd.mcast_seq_last = sn;
+ return RX_CONTINUE;
+ }
+
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
@@ -3369,8 +3386,7 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
if (color == bss_conf->he_bss_color.color)
ieee80211_obss_color_collision_notify(&rx->sdata->vif,
- BIT_ULL(color),
- GFP_ATOMIC);
+ BIT_ULL(color));
}
}
@@ -3763,6 +3779,28 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
}
break;
+ case WLAN_CATEGORY_PROTECTED_EHT:
+ switch (mgmt->u.action.u.ttlm_req.action_code) {
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_req))
+ goto invalid;
+ goto queue;
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_res))
+ goto invalid;
+ goto queue;
+ default:
+ break;
+ }
+ break;
}
return RX_CONTINUE;
@@ -5192,7 +5230,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
*/
if (!status->link_valid && pubsta->mlo) {
- struct ieee80211_hdr *hdr = (void *)skb->data;
struct link_sta_info *link_sta;
link_sta = link_sta_info_get_bss(rx.sdata,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f9d5842601fa..0429e59ba387 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -257,7 +257,6 @@ static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata,
void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_sub_if_data *sdata1, *sdata2;
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
@@ -281,12 +280,6 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
if (skb->len < min_hdr_len)
return;
- sdata1 = rcu_dereference(local->scan_sdata);
- sdata2 = rcu_dereference(local->sched_scan_sdata);
-
- if (likely(!sdata1 && !sdata2))
- return;
-
if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) {
/*
* we were passive scanning because of radar/no-IR, but
@@ -304,10 +297,17 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
return;
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ struct ieee80211_sub_if_data *sdata1, *sdata2;
struct cfg80211_scan_request *scan_req;
struct cfg80211_sched_scan_request *sched_scan_req;
u32 scan_req_flags = 0, sched_scan_req_flags = 0;
+ sdata1 = rcu_dereference(local->scan_sdata);
+ sdata2 = rcu_dereference(local->sched_scan_sdata);
+
+ if (likely(!sdata1 && !sdata2))
+ return;
+
scan_req = rcu_dereference(local->scan_req);
sched_scan_req = rcu_dereference(local->sched_scan_req);
@@ -327,8 +327,16 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
sched_scan_req_flags,
mgmt->da))
return;
+ } else {
+ /* Beacons are expected only with broadcast address */
+ if (!is_broadcast_ether_addr(mgmt->da))
+ return;
}
+ /* Do not update the BSS table in case of only monitor interfaces */
+ if (local->open_count == local->monitors)
+ return;
+
bss = ieee80211_bss_info_update(local, rx_status,
mgmt, skb->len,
channel);
@@ -400,6 +408,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
req->ie, req->ie_len,
bands_used, req->rates, &chandef,
flags);
+ if (ielen < 0)
+ return false;
local->hw_scan_req->req.ie_len = ielen;
local->hw_scan_req->req.no_cck = req->no_cck;
ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
@@ -476,7 +486,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
}
/* Set power back to normal operating levels. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
if (!hw_scan && was_scanning) {
ieee80211_configure_filter(local);
@@ -523,7 +533,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
/* Software scan is not supported in multi-channel cases */
- if (local->use_chanctx)
+ if (!local->emulate_chanctx)
return -EOPNOTSUPP;
/*
@@ -553,7 +563,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
ieee80211_configure_filter(local);
/* We need to set power level at maximum rate for scanning. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
@@ -677,7 +687,10 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
* After sending probe requests, wait for probe responses
* on the channel.
*/
- *next_delay = IEEE80211_CHANNEL_TIME;
+ *next_delay = msecs_to_jiffies(scan_req->duration) >
+ IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME ?
+ msecs_to_jiffies(scan_req->duration) - IEEE80211_PROBE_DELAY :
+ IEEE80211_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
}
@@ -787,7 +800,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
} else if ((req->n_channels == 1) &&
- (req->channels[0] == local->_oper_chandef.chan)) {
+ (req->channels[0] == local->hw.conf.chandef.chan)) {
/*
* If we are scanning only on the operating channel
* then we do not need to stop normal activities
@@ -805,7 +818,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
ieee80211_configure_filter(local); /* accept probe-responses */
/* We need to ensure power level is at max for scanning. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_RADAR)) ||
@@ -970,13 +983,13 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
- if (chan == local->_oper_chandef.chan)
- local->scan_chandef = local->_oper_chandef;
+ if (chan == local->hw.conf.chandef.chan)
+ local->scan_chandef = local->hw.conf.chandef;
else
local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
set_channel:
- if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+ if (ieee80211_hw_conf_chan(local))
skip = 1;
/* advance state machine to next channel/band */
@@ -1000,7 +1013,10 @@ set_channel:
*/
if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
!scan_req->n_ssids) {
- *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+ *next_delay = msecs_to_jiffies(scan_req->duration) >
+ IEEE80211_PASSIVE_CHANNEL_TIME ?
+ msecs_to_jiffies(scan_req->duration) :
+ IEEE80211_PASSIVE_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
if (scan_req->n_ssids)
set_bit(SCAN_BEACON_WAIT, &local->scanning);
@@ -1017,7 +1033,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
{
/* switch back to the operating channel */
local->scan_chandef.chan = NULL;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ ieee80211_hw_conf_chan(local);
/* disable PS */
ieee80211_offchannel_return(local);
@@ -1316,10 +1332,12 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
ieee80211_prepare_scan_chandef(&chandef);
- ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
- &sched_scan_ies, req->ie,
- req->ie_len, bands_used, rate_masks, &chandef,
- flags);
+ ret = ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
+ &sched_scan_ies, req->ie,
+ req->ie_len, bands_used, rate_masks,
+ &chandef, flags);
+ if (ret < 0)
+ goto error;
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
if (ret == 0) {
@@ -1327,8 +1345,8 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
rcu_assign_pointer(local->sched_scan_req, req);
}
+error:
kfree(ie);
-
out:
if (ret) {
/* Clean in case of failure after HW restart or upon resume. */
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 55959b0b24c5..327c74e296e2 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -19,21 +19,222 @@
#include "sta_info.h"
#include "wme.h"
+static bool
+wbcs_elem_to_chandef(const struct ieee80211_wide_bw_chansw_ie *wbcs_elem,
+ struct cfg80211_chan_def *chandef)
+{
+ u8 ccfs0 = wbcs_elem->new_center_freq_seg0;
+ u8 ccfs1 = wbcs_elem->new_center_freq_seg1;
+ u32 cf0 = ieee80211_channel_to_frequency(ccfs0, chandef->chan->band);
+ u32 cf1 = ieee80211_channel_to_frequency(ccfs1, chandef->chan->band);
+
+ switch (wbcs_elem->new_channel_width) {
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ /* deprecated encoding */
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 = cf0;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ /* deprecated encoding */
+ chandef->width = NL80211_CHAN_WIDTH_80P80;
+ chandef->center_freq1 = cf0;
+ chandef->center_freq2 = cf1;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_80;
+ chandef->center_freq1 = cf0;
+
+ if (ccfs1) {
+ u8 diff = abs(ccfs0 - ccfs1);
+
+ if (diff == 8) {
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 = cf1;
+ } else if (diff > 8) {
+ chandef->width = NL80211_CHAN_WIDTH_80P80;
+ chandef->center_freq2 = cf1;
+ }
+ }
+ break;
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ default:
+ /* If the WBCS Element is present, new channel bandwidth is
+ * at least 40 MHz.
+ */
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ chandef->center_freq1 = cf0;
+ break;
+ }
+
+ return cfg80211_chandef_valid(chandef);
+}
+
+static void
+validate_chandef_by_ht_vht_oper(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ u32 vht_cap_info,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 control_freq, center_freq1, center_freq2;
+ enum nl80211_chan_width chan_width;
+ struct ieee80211_ht_operation ht_oper;
+ struct ieee80211_vht_operation vht_oper;
+
+ if (conn->mode < IEEE80211_CONN_MODE_HT ||
+ conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
+ chandef->chan = NULL;
+ return;
+ }
+
+ control_freq = chandef->chan->center_freq;
+ center_freq1 = chandef->center_freq1;
+ center_freq2 = chandef->center_freq2;
+ chan_width = chandef->width;
+
+ ht_oper.primary_chan = ieee80211_frequency_to_channel(control_freq);
+ if (control_freq != center_freq1)
+ ht_oper.ht_param = control_freq > center_freq1 ?
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW :
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ ht_oper.ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+ ieee80211_chandef_ht_oper(&ht_oper, chandef);
+
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
+ return;
+
+ vht_oper.center_freq_seg0_idx =
+ ieee80211_frequency_to_channel(center_freq1);
+ vht_oper.center_freq_seg1_idx = center_freq2 ?
+ ieee80211_frequency_to_channel(center_freq2) : 0;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_320:
+ WARN_ON(1);
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ vht_oper.center_freq_seg1_idx = vht_oper.center_freq_seg0_idx;
+ vht_oper.center_freq_seg0_idx +=
+ control_freq < center_freq1 ? -8 : 8;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ default:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ ht_oper.operation_mode =
+ le16_encode_bits(vht_oper.center_freq_seg1_idx,
+ IEEE80211_HT_OP_MODE_CCFS2_MASK);
+
+ if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
+ &vht_oper, &ht_oper, chandef))
+ chandef->chan = NULL;
+}
+
+static void
+validate_chandef_by_6ghz_he_eht_oper(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_local *local = sdata->local;
+ u32 control_freq, center_freq1, center_freq2;
+ enum nl80211_chan_width chan_width;
+ struct {
+ struct ieee80211_he_operation _oper;
+ struct ieee80211_he_6ghz_oper _6ghz_oper;
+ } __packed he;
+ struct {
+ struct ieee80211_eht_operation _oper;
+ struct ieee80211_eht_operation_info _oper_info;
+ } __packed eht;
+
+ if (conn->mode < IEEE80211_CONN_MODE_HE) {
+ chandef->chan = NULL;
+ return;
+ }
+
+ control_freq = chandef->chan->center_freq;
+ center_freq1 = chandef->center_freq1;
+ center_freq2 = chandef->center_freq2;
+ chan_width = chandef->width;
+
+ he._oper.he_oper_params =
+ le32_encode_bits(1, IEEE80211_HE_OPERATION_6GHZ_OP_INFO);
+ he._6ghz_oper.primary =
+ ieee80211_frequency_to_channel(control_freq);
+ he._6ghz_oper.ccfs0 = ieee80211_frequency_to_channel(center_freq1);
+ he._6ghz_oper.ccfs1 = center_freq2 ?
+ ieee80211_frequency_to_channel(center_freq2) : 0;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_320:
+ he._6ghz_oper.ccfs1 = he._6ghz_oper.ccfs0;
+ he._6ghz_oper.ccfs0 += control_freq < center_freq1 ? -16 : 16;
+ he._6ghz_oper.control = IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ he._6ghz_oper.ccfs1 = he._6ghz_oper.ccfs0;
+ he._6ghz_oper.ccfs0 += control_freq < center_freq1 ? -8 : 8;
+ fallthrough;
+ case NL80211_CHAN_WIDTH_80P80:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ;
+ break;
+ default:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ;
+ break;
+ }
+
+ if (conn->mode < IEEE80211_CONN_MODE_EHT) {
+ if (!ieee80211_chandef_he_6ghz_oper(local, &he._oper,
+ NULL, chandef))
+ chandef->chan = NULL;
+ } else {
+ eht._oper.params = IEEE80211_EHT_OPER_INFO_PRESENT;
+ eht._oper_info.control = he._6ghz_oper.control;
+ eht._oper_info.ccfs0 = he._6ghz_oper.ccfs0;
+ eht._oper_info.ccfs1 = he._6ghz_oper.ccfs1;
+
+ if (!ieee80211_chandef_he_6ghz_oper(local, &he._oper,
+ &eht._oper, chandef))
+ chandef->chan = NULL;
+ }
+}
+
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band current_band,
u32 vht_cap_info,
- ieee80211_conn_flags_t conn_flags, u8 *bssid,
+ struct ieee80211_conn_settings *conn,
+ u8 *bssid,
struct ieee80211_csa_ie *csa_ie)
{
enum nl80211_band new_band = current_band;
int new_freq;
- u8 new_chan_no;
+ u8 new_chan_no = 0, new_op_class = 0;
struct ieee80211_channel *new_chan;
- struct cfg80211_chan_def new_vht_chandef = {};
+ struct cfg80211_chan_def new_chandef = {};
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
const struct ieee80211_bandwidth_indication *bwi;
+ const struct ieee80211_ext_chansw_ie *ext_chansw_elem;
int secondary_channel_offset = -1;
memset(csa_ie, 0, sizeof(*csa_ie));
@@ -41,36 +242,41 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
sec_chan_offs = elems->sec_chan_offs;
wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
bwi = elems->bandwidth_indication;
+ ext_chansw_elem = elems->ext_chansw_ie;
- if (conn_flags & (IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_40MHZ)) {
+ if (conn->mode < IEEE80211_CONN_MODE_HT ||
+ conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
sec_chan_offs = NULL;
wide_bw_chansw_ie = NULL;
}
- if (conn_flags & IEEE80211_CONN_DISABLE_VHT)
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
wide_bw_chansw_ie = NULL;
- if (elems->ext_chansw_ie) {
- if (!ieee80211_operating_class_to_band(
- elems->ext_chansw_ie->new_operating_class,
- &new_band)) {
- sdata_info(sdata,
- "cannot understand ECSA IE operating class, %d, ignoring\n",
- elems->ext_chansw_ie->new_operating_class);
+ if (ext_chansw_elem) {
+ new_op_class = ext_chansw_elem->new_operating_class;
+
+ if (!ieee80211_operating_class_to_band(new_op_class, &new_band)) {
+ new_op_class = 0;
+ sdata_info(sdata, "cannot understand ECSA IE operating class, %d, ignoring\n",
+ ext_chansw_elem->new_operating_class);
+ } else {
+ new_chan_no = ext_chansw_elem->new_ch_num;
+ csa_ie->count = ext_chansw_elem->count;
+ csa_ie->mode = ext_chansw_elem->mode;
}
- new_chan_no = elems->ext_chansw_ie->new_ch_num;
- csa_ie->count = elems->ext_chansw_ie->count;
- csa_ie->mode = elems->ext_chansw_ie->mode;
- } else if (elems->ch_switch_ie) {
+ }
+
+ if (!new_op_class && elems->ch_switch_ie) {
new_chan_no = elems->ch_switch_ie->new_ch_num;
csa_ie->count = elems->ch_switch_ie->count;
csa_ie->mode = elems->ch_switch_ie->mode;
- } else {
- /* nothing here we understand */
- return 1;
}
+ /* nothing here we understand */
+ if (!new_chan_no)
+ return 1;
+
/* Mesh Channel Switch Parameters Element */
if (elems->mesh_chansw_params_ie) {
csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl;
@@ -95,7 +301,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
if (sec_chan_offs) {
secondary_channel_offset = sec_chan_offs->sec_chan_offs;
- } else if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ } else if (conn->mode >= IEEE80211_CONN_MODE_HT) {
/* If the secondary channel offset IE is not present,
* we can't know what's the post-CSA offset, so the
* best we can do is use 20MHz.
@@ -107,26 +313,26 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
default:
/* secondary_channel_offset was present but is invalid */
case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT20);
break;
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT40PLUS);
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT40MINUS);
break;
case -1:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_NO_HT);
/* keep width for 5/10 MHz channels */
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
- csa_ie->chandef.width =
- sdata->vif.bss_conf.chandef.width;
+ csa_ie->chanreq.oper.width =
+ sdata->vif.bss_conf.chanreq.oper.width;
break;
default:
break;
@@ -134,59 +340,48 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
break;
}
+ /* parse one of the Elements to build a new chandef */
+ memset(&new_chandef, 0, sizeof(new_chandef));
+ new_chandef.chan = new_chan;
if (bwi) {
/* start with the CSA one */
- new_vht_chandef = csa_ie->chandef;
+ new_chandef = csa_ie->chanreq.oper;
/* and update the width accordingly */
- /* FIXME: support 160/320 */
- ieee80211_chandef_eht_oper(&bwi->info, true, true,
- &new_vht_chandef);
- } else if (wide_bw_chansw_ie) {
- u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
- struct ieee80211_vht_operation vht_oper = {
- .chan_width =
- wide_bw_chansw_ie->new_channel_width,
- .center_freq_seg0_idx =
- wide_bw_chansw_ie->new_center_freq_seg0,
- .center_freq_seg1_idx = new_seg1,
- /* .basic_mcs_set doesn't matter */
- };
- struct ieee80211_ht_operation ht_oper = {
- .operation_mode =
- cpu_to_le16(new_seg1 <<
- IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
- };
-
- /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
- * to the previously parsed chandef
- */
- new_vht_chandef = csa_ie->chandef;
-
- /* ignore if parsing fails */
- if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
- vht_cap_info,
- &vht_oper, &ht_oper,
- &new_vht_chandef))
- new_vht_chandef.chan = NULL;
-
- if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
- ieee80211_chandef_downgrade(&new_vht_chandef);
- if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
- ieee80211_chandef_downgrade(&new_vht_chandef);
+ ieee80211_chandef_eht_oper(&bwi->info, &new_chandef);
+ } else if (!wide_bw_chansw_ie || !wbcs_elem_to_chandef(wide_bw_chansw_ie,
+ &new_chandef)) {
+ if (!ieee80211_operating_class_to_chandef(new_op_class, new_chan,
+ &new_chandef))
+ new_chandef = csa_ie->chanreq.oper;
}
- /* if VHT data is there validate & use it */
- if (new_vht_chandef.chan) {
- if (!cfg80211_chandef_compatible(&new_vht_chandef,
- &csa_ie->chandef)) {
+ /* check if the new chandef fits the capabilities */
+ if (new_band == NL80211_BAND_6GHZ)
+ validate_chandef_by_6ghz_he_eht_oper(sdata, conn, &new_chandef);
+ else
+ validate_chandef_by_ht_vht_oper(sdata, conn, vht_cap_info,
+ &new_chandef);
+
+ /* if data is there validate the bandwidth & use it */
+ if (new_chandef.chan) {
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320 &&
+ new_chandef.width == NL80211_CHAN_WIDTH_320)
+ ieee80211_chandef_downgrade(&new_chandef, NULL);
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160 &&
+ (new_chandef.width == NL80211_CHAN_WIDTH_80P80 ||
+ new_chandef.width == NL80211_CHAN_WIDTH_160))
+ ieee80211_chandef_downgrade(&new_chandef, NULL);
+
+ if (!cfg80211_chandef_compatible(&new_chandef,
+ &csa_ie->chanreq.oper)) {
sdata_info(sdata,
"BSS %pM: CSA has inconsistent channel data, disconnecting\n",
bssid);
return -EINVAL;
}
- csa_ie->chandef = new_vht_chandef;
+
+ csa_ie->chanreq.oper = new_chandef;
}
if (elems->max_channel_switch_time)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 4391d8dd634b..da5fdd6f5c85 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1566,7 +1566,8 @@ void sta_info_stop(struct ieee80211_local *local)
}
-int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
+int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
+ int link_id)
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta, *tmp;
@@ -1580,12 +1581,18 @@ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
WARN_ON(vlans && !sdata->bss);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- if (sdata == sta->sdata ||
- (vlans && sdata->bss == sta->sdata->bss)) {
- if (!WARN_ON(__sta_info_destroy_part1(sta)))
- list_add(&sta->free_list, &free_list);
- ret++;
- }
+ if (sdata != sta->sdata &&
+ (!vlans || sdata->bss != sta->sdata->bss))
+ continue;
+
+ if (link_id >= 0 && sta->sta.valid_links &&
+ !(sta->sta.valid_links & BIT(link_id)))
+ continue;
+
+ if (!WARN_ON(__sta_info_destroy_part1(sta)))
+ list_add(&sta->free_list, &free_list);
+
+ ret++;
}
if (!list_empty(&free_list)) {
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5ef1554f991f..a52fb76386d0 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -3,7 +3,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#ifndef STA_INFO_H
@@ -482,6 +482,8 @@ struct ieee80211_fragment_cache {
* same for non-MLD STA. This is used as key for searching link STA
* @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD
* and set to the corresponding vif LinkId for MLD STA
+ * @op_mode_nss: NSS limit as set by operating mode notification, or 0
+ * @capa_nss: NSS limit as determined by local and peer capabilities
* @link_hash_node: hash node for rhashtable
* @sta: Points to the STA info
* @gtk: group keys negotiated with this station, if any
@@ -518,6 +520,8 @@ struct link_sta_info {
u8 addr[ETH_ALEN];
u8 link_id;
+ u8 op_mode_nss, capa_nss;
+
struct rhlist_head link_hash_node;
struct sta_info *sta;
@@ -886,8 +890,12 @@ void sta_info_stop(struct ieee80211_local *local);
*
* @sdata: sdata to remove all stations from
* @vlans: if the given interface is an AP interface, also flush VLANs
+ * @link_id: if given (>=0), all those STA entries using @link_id only
+ * will be removed. If -1 is passed, all STA entries will be
+ * removed.
*/
-int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans);
+int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
+ int link_id);
/**
* sta_info_flush - flush matching STA entries from the STA table
@@ -895,10 +903,14 @@ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans);
* Returns the number of removed STA entries.
*
* @sdata: sdata to remove all stations from
+ * @link_id: if given (>=0), all those STA entries using @link_id only
+ * will be removed. If -1 is passed, all STA entries will be
+ * removed.
*/
-static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata)
+static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata,
+ int link_id)
{
- return __sta_info_flush(sdata, false);
+ return __sta_info_flush(sdata, false, link_id);
}
void sta_set_rate_info_tx(struct sta_info *sta,
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 49730b424141..f07b40916485 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -6,7 +6,7 @@
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
* Copyright 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2019, 2021-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -159,7 +159,7 @@ static void ieee80211_tdls_add_oper_classes(struct ieee80211_link_data *link,
u8 *pos;
u8 op_class;
- if (!ieee80211_chandef_to_operating_class(&link->conf->chandef,
+ if (!ieee80211_chandef_to_operating_class(&link->conf->chanreq.oper,
&op_class))
return;
@@ -347,7 +347,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
(uc.width > sta->tdls_chandef.width &&
!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
sdata->wdev.iftype)))
- ieee80211_chandef_downgrade(&uc);
+ ieee80211_chandef_downgrade(&uc, NULL);
if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
@@ -382,8 +382,8 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sband))
return;
- ieee80211_add_srates_ie(sdata, skb, false, sband->band);
- ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, 0, WLAN_EID_SUPP_RATES);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, 0, WLAN_EID_EXT_SUPP_RATES);
ieee80211_tdls_add_supp_channels(sdata, skb);
/* add any custom IEs that go before Extended Capabilities */
@@ -438,7 +438,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sta))
return;
- sta->tdls_chandef = link->conf->chandef;
+ sta->tdls_chandef = link->conf->chanreq.oper;
}
ieee80211_tdls_add_oper_classes(link, skb);
@@ -548,30 +548,14 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
}
/* build the HE-cap from sband */
- if (he_cap &&
- (action_code == WLAN_TDLS_SETUP_REQUEST ||
- action_code == WLAN_TDLS_SETUP_RESPONSE ||
- action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
- __le16 he_6ghz_capa;
- u8 cap_size;
-
- cap_size =
- 2 + 1 + sizeof(he_cap->he_cap_elem) +
- ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
- ieee80211_he_ppe_size(he_cap->ppe_thres[0],
- he_cap->he_cap_elem.phy_cap_info);
- pos = skb_put(skb, cap_size);
- pos = ieee80211_ie_build_he_cap(0, pos, he_cap, pos + cap_size);
+ if (action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_TDLS_SETUP_RESPONSE ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
+ ieee80211_put_he_cap(skb, sdata, sband, NULL);
/* Build HE 6Ghz capa IE from sband */
- if (sband->band == NL80211_BAND_6GHZ) {
- cap_size = 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
- pos = skb_put(skb, cap_size);
- he_6ghz_capa =
- ieee80211_get_he_6ghz_capa_vif(sband, &sdata->vif);
- pos = ieee80211_write_he_6ghz_cap(pos, he_6ghz_capa,
- pos + cap_size);
- }
+ if (sband->band == NL80211_BAND_6GHZ)
+ ieee80211_put_he_6ghz_cap(skb, sdata, link->smps_mode);
}
/* add any custom IEs that go before EHT capabilities */
@@ -591,21 +575,10 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
}
/* build the EHT-cap from sband */
- if (he_cap && eht_cap &&
- (action_code == WLAN_TDLS_SETUP_REQUEST ||
- action_code == WLAN_TDLS_SETUP_RESPONSE ||
- action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
- u8 cap_size;
-
- cap_size =
- 2 + 1 + sizeof(eht_cap->eht_cap_elem) +
- ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem, false) +
- ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
- pos = skb_put(skb, cap_size);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + cap_size, false);
- }
+ if (action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_TDLS_SETUP_RESPONSE ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)
+ ieee80211_put_eht_cap(skb, sdata, sband, NULL);
/* add any remaining IEs */
if (extra_ies_len) {
@@ -638,7 +611,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sta || !ap_sta))
return;
- sta->tdls_chandef = link->conf->chandef;
+ sta->tdls_chandef = link->conf->chanreq.oper;
/* add any custom IEs that go before the QoS IE */
if (extra_ies_len) {
@@ -684,7 +657,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap,
- &link->conf->chandef, prot,
+ &link->conf->chanreq.oper, prot,
true);
}
@@ -1413,8 +1386,8 @@ iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
u16 opmode;
- /* Nothing to do if the BSS connection uses HT */
- if (!(sdata->deflink.u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
+ /* Nothing to do if the BSS connection uses (at least) HT */
+ if (sdata->deflink.u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT)
return;
tdls_ht = (sta && sta->sta.deflink.ht_cap.ht_supported) ||
@@ -2055,8 +2028,9 @@ ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
}
}
-void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+void ieee80211_teardown_tdls_peers(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct sta_info *sta;
u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
@@ -2066,6 +2040,9 @@ void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
continue;
+ if (sta->deflink.link_id != link->link_id)
+ continue;
+
ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
NL80211_TDLS_TEARDOWN, reason,
GFP_ATOMIC);
diff --git a/net/mac80211/tests/elems.c b/net/mac80211/tests/elems.c
index 997d0cd27b2d..a413ba29f759 100644
--- a/net/mac80211/tests/elems.c
+++ b/net/mac80211/tests/elems.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for element parsing
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <kunit/test.h>
#include "../ieee80211_i.h"
@@ -14,6 +14,7 @@ static void mle_defrag(struct kunit *test)
struct ieee80211_elems_parse_params parse_params = {
.link_id = 12,
.from_ap = true,
+ .mode = IEEE80211_CONN_MODE_EHT,
};
struct ieee802_11_elems *parsed;
struct sk_buff *skb;
@@ -68,7 +69,7 @@ static void mle_defrag(struct kunit *test)
if (IS_ERR_OR_NULL(parsed))
goto free_skb;
- KUNIT_EXPECT_NOT_NULL(test, parsed->ml_basic_elem);
+ KUNIT_EXPECT_NOT_NULL(test, parsed->ml_basic);
KUNIT_EXPECT_EQ(test,
parsed->ml_basic_len,
2 /* control */ +
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 06835ed4c44f..8e758b5074bd 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -50,7 +50,7 @@
__entry->center_freq1 = (c) ? (c)->center_freq1 : 0; \
__entry->freq1_offset = (c) ? (c)->freq1_offset : 0; \
__entry->center_freq2 = (c) ? (c)->center_freq2 : 0;
-#define CHANDEF_PR_FMT " control:%d.%03d MHz width:%d center: %d.%03d/%d MHz"
+#define CHANDEF_PR_FMT " chandef(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
#define CHANDEF_PR_ARG __entry->control_freq, __entry->freq_offset, __entry->chan_width, \
__entry->center_freq1, __entry->freq1_offset, __entry->center_freq2
@@ -69,22 +69,45 @@
__entry->min_center_freq1 = (c)->center_freq1; \
__entry->min_freq1_offset = (c)->freq1_offset; \
__entry->min_center_freq2 = (c)->center_freq2;
-#define MIN_CHANDEF_PR_FMT " min_control:%d.%03d MHz min_width:%d min_center: %d.%03d/%d MHz"
+#define MIN_CHANDEF_PR_FMT " mindef(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
#define MIN_CHANDEF_PR_ARG __entry->min_control_freq, __entry->min_freq_offset, \
__entry->min_chan_width, \
__entry->min_center_freq1, __entry->min_freq1_offset, \
__entry->min_center_freq2
+#define AP_CHANDEF_ENTRY \
+ __field(u32, ap_control_freq) \
+ __field(u32, ap_freq_offset) \
+ __field(u32, ap_chan_width) \
+ __field(u32, ap_center_freq1) \
+ __field(u32, ap_freq1_offset) \
+ __field(u32, ap_center_freq2)
+
+#define AP_CHANDEF_ASSIGN(c) \
+ __entry->ap_control_freq = (c)->chan ? (c)->chan->center_freq : 0;\
+ __entry->ap_freq_offset = (c)->chan ? (c)->chan->freq_offset : 0;\
+ __entry->ap_chan_width = (c)->chan ? (c)->width : 0; \
+ __entry->ap_center_freq1 = (c)->chan ? (c)->center_freq1 : 0; \
+ __entry->ap_freq1_offset = (c)->chan ? (c)->freq1_offset : 0; \
+ __entry->ap_center_freq2 = (c)->chan ? (c)->center_freq2 : 0;
+#define AP_CHANDEF_PR_FMT " ap(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
+#define AP_CHANDEF_PR_ARG __entry->ap_control_freq, __entry->ap_freq_offset, \
+ __entry->ap_chan_width, \
+ __entry->ap_center_freq1, __entry->ap_freq1_offset, \
+ __entry->ap_center_freq2
+
#define CHANCTX_ENTRY CHANDEF_ENTRY \
MIN_CHANDEF_ENTRY \
+ AP_CHANDEF_ENTRY \
__field(u8, rx_chains_static) \
__field(u8, rx_chains_dynamic)
#define CHANCTX_ASSIGN CHANDEF_ASSIGN(&ctx->conf.def) \
MIN_CHANDEF_ASSIGN(&ctx->conf.min_def) \
+ AP_CHANDEF_ASSIGN(&ctx->conf.ap) \
__entry->rx_chains_static = ctx->conf.rx_chains_static; \
__entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic
-#define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT " chains:%d/%d"
-#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, \
+#define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT AP_CHANDEF_PR_FMT " chains:%d/%d"
+#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, AP_CHANDEF_PR_ARG, \
__entry->rx_chains_static, __entry->rx_chains_dynamic
#define KEY_ENTRY __field(u32, cipher) \
@@ -503,9 +526,9 @@ TRACE_EVENT(drv_link_info_changed,
__entry->ht_operation_mode = link_conf->ht_operation_mode;
__entry->cqm_rssi_thold = link_conf->cqm_rssi_thold;
__entry->cqm_rssi_hyst = link_conf->cqm_rssi_hyst;
- __entry->channel_width = link_conf->chandef.width;
- __entry->channel_cfreq1 = link_conf->chandef.center_freq1;
- __entry->channel_cfreq1_offset = link_conf->chandef.freq1_offset;
+ __entry->channel_width = link_conf->chanreq.oper.width;
+ __entry->channel_cfreq1 = link_conf->chanreq.oper.center_freq1;
+ __entry->channel_cfreq1_offset = link_conf->chanreq.oper.freq1_offset;
__entry->qos = link_conf->qos;
__entry->hidden_ssid = link_conf->hidden_ssid;
__entry->txpower = link_conf->txpower;
@@ -1186,7 +1209,7 @@ DEFINE_EVENT(sta_event, drv_flush_sta,
TP_ARGS(local, sdata, sta)
);
-TRACE_EVENT(drv_channel_switch,
+DECLARE_EVENT_CLASS(chanswitch_evt,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
@@ -1201,6 +1224,7 @@ TRACE_EVENT(drv_channel_switch,
__field(u32, device_timestamp)
__field(bool, block_tx)
__field(u8, count)
+ __field(u8, link_id)
),
TP_fast_assign(
@@ -1211,14 +1235,24 @@ TRACE_EVENT(drv_channel_switch,
__entry->device_timestamp = ch_switch->device_timestamp;
__entry->block_tx = ch_switch->block_tx;
__entry->count = ch_switch->count;
+ __entry->link_id = ch_switch->link_id;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " new " CHANDEF_PR_FMT " count:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count
+ LOCAL_PR_FMT VIF_PR_FMT CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu device_ts:%u link_id:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
+ __entry->block_tx, __entry->timestamp,
+ __entry->device_timestamp, __entry->link_id
)
);
+DEFINE_EVENT(chanswitch_evt, drv_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch),
+ TP_ARGS(local, sdata, ch_switch)
+);
+
TRACE_EVENT(drv_set_antenna,
TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret),
@@ -2098,39 +2132,11 @@ TRACE_EVENT(drv_channel_switch_beacon,
)
);
-TRACE_EVENT(drv_pre_channel_switch,
+DEFINE_EVENT(chanswitch_evt, drv_pre_channel_switch,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
-
- TP_ARGS(local, sdata, ch_switch),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- VIF_ENTRY
- CHANDEF_ENTRY
- __field(u64, timestamp)
- __field(u32, device_timestamp)
- __field(bool, block_tx)
- __field(u8, count)
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- VIF_ASSIGN;
- CHANDEF_ASSIGN(&ch_switch->chandef)
- __entry->timestamp = ch_switch->timestamp;
- __entry->device_timestamp = ch_switch->device_timestamp;
- __entry->block_tx = ch_switch->block_tx;
- __entry->count = ch_switch->count;
- ),
-
- TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " prepare channel switch to "
- CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
- __entry->block_tx, __entry->timestamp
- )
+ TP_ARGS(local, sdata, ch_switch)
);
DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch,
@@ -2145,40 +2151,11 @@ DEFINE_EVENT(local_sdata_evt, drv_abort_channel_switch,
TP_ARGS(local, sdata)
);
-TRACE_EVENT(drv_channel_switch_rx_beacon,
+DEFINE_EVENT(chanswitch_evt, drv_channel_switch_rx_beacon,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
-
- TP_ARGS(local, sdata, ch_switch),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- VIF_ENTRY
- CHANDEF_ENTRY
- __field(u64, timestamp)
- __field(u32, device_timestamp)
- __field(bool, block_tx)
- __field(u8, count)
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- VIF_ASSIGN;
- CHANDEF_ASSIGN(&ch_switch->chandef)
- __entry->timestamp = ch_switch->timestamp;
- __entry->device_timestamp = ch_switch->device_timestamp;
- __entry->block_tx = ch_switch->block_tx;
- __entry->count = ch_switch->count;
- ),
-
- TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT
- " received a channel switch beacon to "
- CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
- __entry->block_tx, __entry->timestamp
- )
+ TP_ARGS(local, sdata, ch_switch)
);
TRACE_EVENT(drv_get_txpower,
@@ -3035,6 +3012,34 @@ TRACE_EVENT(api_radar_detected,
)
);
+TRACE_EVENT(api_request_smps,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link,
+ enum ieee80211_smps_mode smps_mode),
+
+ TP_ARGS(local, sdata, link, smps_mode),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(int, link_id)
+ __field(u32, smps_mode)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->link_id = link->link_id,
+ __entry->smps_mode = smps_mode;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " " VIF_PR_FMT " link:%d, smps_mode:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, __entry->smps_mode
+ )
+);
+
/*
* Tracing for internal functions
* (which may also be called in response to driver calls)
@@ -3088,6 +3093,58 @@ TRACE_EVENT(stop_queue,
)
);
+TRACE_EVENT(drv_can_neg_ttlm,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm),
+
+ TP_ARGS(local, sdata, neg_ttlm),
+
+ TP_STRUCT__entry(LOCAL_ENTRY
+ VIF_ENTRY
+ __array(u16, downlink, sizeof(u16) * 8)
+ __array(u16, uplink, sizeof(u16) * 8)
+ ),
+
+ TP_fast_assign(LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->downlink, neg_ttlm->downlink,
+ sizeof(neg_ttlm->downlink));
+ memcpy(__entry->uplink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->uplink));
+ ),
+
+ TP_printk(LOCAL_PR_FMT ", " VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG)
+);
+
+TRACE_EVENT(drv_neg_ttlm_res,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_neg_ttlm_res res,
+ struct ieee80211_neg_ttlm *neg_ttlm),
+
+ TP_ARGS(local, sdata, res, neg_ttlm),
+
+ TP_STRUCT__entry(LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u32, res)
+ __array(u16, downlink, sizeof(u16) * 8)
+ __array(u16, uplink, sizeof(u16) * 8)
+ ),
+
+ TP_fast_assign(LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->res = res;
+ memcpy(__entry->downlink, neg_ttlm->downlink,
+ sizeof(neg_ttlm->downlink));
+ memcpy(__entry->uplink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->uplink));
+ ),
+
+ TP_printk(LOCAL_PR_FMT VIF_PR_FMT " response: %d\n ",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->res
+ )
+);
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
index c9dbe9aab7bd..aea4ce55c5ac 100644
--- a/net/mac80211/trace_msg.h
+++ b/net/mac80211/trace_msg.h
@@ -16,8 +16,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211_msg
-#define MAX_MSG_LEN 120
-
DECLARE_EVENT_CLASS(mac80211_msg_event,
TP_PROTO(struct va_format *vaf),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 6fbb15b65902..6bf223e6cd1a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -133,6 +133,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
mrate = sband->bitrates[0].bitrate;
for (i = 0; i < sband->n_bitrates; i++) {
struct ieee80211_rate *r = &sband->bitrates[i];
+ u32 flag;
if (r->bitrate > txrate->bitrate)
break;
@@ -145,28 +146,24 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
switch (sband->band) {
case NL80211_BAND_2GHZ:
- case NL80211_BAND_LC: {
- u32 flag;
+ case NL80211_BAND_LC:
if (tx->sdata->deflink.operating_11g_mode)
flag = IEEE80211_RATE_MANDATORY_G;
else
flag = IEEE80211_RATE_MANDATORY_B;
- if (r->flags & flag)
- mrate = r->bitrate;
break;
- }
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
- if (r->flags & IEEE80211_RATE_MANDATORY_A)
- mrate = r->bitrate;
+ flag = IEEE80211_RATE_MANDATORY_A;
break;
- case NL80211_BAND_S1GHZ:
- case NL80211_BAND_60GHZ:
- /* TODO, for now fall through */
- case NUM_NL80211_BANDS:
+ default:
+ flag = 0;
WARN_ON(1);
break;
}
+
+ if (r->flags & flag)
+ mrate = r->bitrate;
}
if (rate == -1) {
/* No matching basic rate found; use highest suitable mandatory
@@ -2393,12 +2390,18 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
if (chanctx_conf)
chandef = &chanctx_conf->def;
- else if (!local->use_chanctx)
- chandef = &local->_oper_chandef;
else
goto fail_rcu;
/*
+ * If driver/HW supports IEEE80211_CHAN_CAN_MONITOR we still
+ * shouldn't transmit on disabled channels.
+ */
+ if (!cfg80211_chandef_usable(local->hw.wiphy, chandef,
+ IEEE80211_CHAN_DISABLED))
+ goto fail_rcu;
+
+ /*
* Frame injection is not allowed if beaconing is not allowed
* or if we need radar detection. Beaconing is usually not allowed when
* the mode or operation (Adhoc, AP, Mesh) does not support DFS.
@@ -3959,7 +3962,8 @@ begin:
ieee80211_free_txskb(&local->hw, skb);
goto begin;
} else {
- vif = NULL;
+ info->control.vif = NULL;
+ return skb;
}
break;
case NL80211_IFTYPE_AP_VLAN:
@@ -5032,16 +5036,24 @@ static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
return beacon->cntdwn_current_counter;
}
-u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif)
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link;
struct beacon_data *beacon = NULL;
u8 count = 0;
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return 0;
+
rcu_read_lock();
+ link = rcu_dereference(sdata->link[link_id]);
+ if (!link)
+ goto unlock;
+
if (sdata->vif.type == NL80211_IFTYPE_AP)
- beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
+ beacon = rcu_dereference(link->u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
@@ -5083,9 +5095,11 @@ unlock:
}
EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn);
-bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
+ unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link;
struct beacon_data *beacon = NULL;
u8 *beacon_data;
size_t beacon_data_len;
@@ -5094,9 +5108,17 @@ bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
if (!ieee80211_sdata_running(sdata))
return false;
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return 0;
+
rcu_read_lock();
+
+ link = rcu_dereference(sdata->link[link_id]);
+ if (!link)
+ goto out;
+
if (vif->type == NL80211_IFTYPE_AP) {
- beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
+ beacon = rcu_dereference(link->u.ap.beacon);
if (WARN_ON(!beacon || !beacon->tail))
goto out;
beacon_data = beacon->tail;
@@ -5282,7 +5304,7 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
- ieee80211_beacon_update_cntdwn(vif);
+ ieee80211_beacon_update_cntdwn(vif, link->link_id);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 643c54855be6..a237cbcf7b49 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -6,7 +6,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*
* utilities for mac80211
*/
@@ -46,6 +46,11 @@ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
}
EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
+const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited = {
+ .mode = IEEE80211_CONN_MODE_EHT,
+ .bw_limit = IEEE80211_CONN_BW_LIMIT_320,
+};
+
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type)
{
@@ -912,776 +917,6 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_queue_delayed_work);
-static void
-ieee80211_parse_extension_element(u32 *crc,
- const struct element *elem,
- struct ieee802_11_elems *elems,
- struct ieee80211_elems_parse_params *params)
-{
- const void *data = elem->data + 1;
- bool calc_crc = false;
- u8 len;
-
- if (!elem->datalen)
- return;
-
- len = elem->datalen - 1;
-
- switch (elem->data[0]) {
- case WLAN_EID_EXT_HE_MU_EDCA:
- calc_crc = true;
- if (len >= sizeof(*elems->mu_edca_param_set))
- elems->mu_edca_param_set = data;
- break;
- case WLAN_EID_EXT_HE_CAPABILITY:
- if (ieee80211_he_capa_size_ok(data, len)) {
- elems->he_cap = data;
- elems->he_cap_len = len;
- }
- break;
- case WLAN_EID_EXT_HE_OPERATION:
- calc_crc = true;
- if (len >= sizeof(*elems->he_operation) &&
- len >= ieee80211_he_oper_size(data) - 1)
- elems->he_operation = data;
- break;
- case WLAN_EID_EXT_UORA:
- if (len >= 1)
- elems->uora_element = data;
- break;
- case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
- if (len == 3)
- elems->max_channel_switch_time = data;
- break;
- case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
- if (len >= sizeof(*elems->mbssid_config_ie))
- elems->mbssid_config_ie = data;
- break;
- case WLAN_EID_EXT_HE_SPR:
- if (len >= sizeof(*elems->he_spr) &&
- len >= ieee80211_he_spr_size(data))
- elems->he_spr = data;
- break;
- case WLAN_EID_EXT_HE_6GHZ_CAPA:
- if (len >= sizeof(*elems->he_6ghz_capa))
- elems->he_6ghz_capa = data;
- break;
- case WLAN_EID_EXT_EHT_CAPABILITY:
- if (ieee80211_eht_capa_size_ok(elems->he_cap,
- data, len,
- params->from_ap)) {
- elems->eht_cap = data;
- elems->eht_cap_len = len;
- }
- break;
- case WLAN_EID_EXT_EHT_OPERATION:
- if (ieee80211_eht_oper_size_ok(data, len))
- elems->eht_operation = data;
- calc_crc = true;
- break;
- case WLAN_EID_EXT_EHT_MULTI_LINK:
- calc_crc = true;
-
- if (ieee80211_mle_size_ok(data, len)) {
- const struct ieee80211_multi_link_elem *mle =
- (void *)data;
-
- switch (le16_get_bits(mle->control,
- IEEE80211_ML_CONTROL_TYPE)) {
- case IEEE80211_ML_CONTROL_TYPE_BASIC:
- elems->ml_basic_elem = (void *)elem;
- elems->ml_basic = data;
- elems->ml_basic_len = len;
- break;
- case IEEE80211_ML_CONTROL_TYPE_RECONF:
- elems->ml_reconf_elem = (void *)elem;
- elems->ml_reconf = data;
- elems->ml_reconf_len = len;
- break;
- default:
- break;
- }
- }
- break;
- case WLAN_EID_EXT_BANDWIDTH_INDICATION:
- if (ieee80211_bandwidth_indication_size_ok(data, len))
- elems->bandwidth_indication = data;
- calc_crc = true;
- break;
- case WLAN_EID_EXT_TID_TO_LINK_MAPPING:
- calc_crc = true;
- if (ieee80211_tid_to_link_map_size_ok(data, len) &&
- elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) {
- elems->ttlm[elems->ttlm_num] = (void *)data;
- elems->ttlm_num++;
- }
- break;
- }
-
- if (crc && calc_crc)
- *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
-}
-
-static u32
-_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
- struct ieee802_11_elems *elems,
- const struct element *check_inherit)
-{
- const struct element *elem;
- bool calc_crc = params->filter != 0;
- DECLARE_BITMAP(seen_elems, 256);
- u32 crc = params->crc;
-
- bitmap_zero(seen_elems, 256);
-
- for_each_element(elem, params->start, params->len) {
- const struct element *subelem;
- bool elem_parse_failed;
- u8 id = elem->id;
- u8 elen = elem->datalen;
- const u8 *pos = elem->data;
-
- if (check_inherit &&
- !cfg80211_is_element_inherited(elem,
- check_inherit))
- continue;
-
- switch (id) {
- case WLAN_EID_SSID:
- case WLAN_EID_SUPP_RATES:
- case WLAN_EID_FH_PARAMS:
- case WLAN_EID_DS_PARAMS:
- case WLAN_EID_CF_PARAMS:
- case WLAN_EID_TIM:
- case WLAN_EID_IBSS_PARAMS:
- case WLAN_EID_CHALLENGE:
- case WLAN_EID_RSN:
- case WLAN_EID_ERP_INFO:
- case WLAN_EID_EXT_SUPP_RATES:
- case WLAN_EID_HT_CAPABILITY:
- case WLAN_EID_HT_OPERATION:
- case WLAN_EID_VHT_CAPABILITY:
- case WLAN_EID_VHT_OPERATION:
- case WLAN_EID_MESH_ID:
- case WLAN_EID_MESH_CONFIG:
- case WLAN_EID_PEER_MGMT:
- case WLAN_EID_PREQ:
- case WLAN_EID_PREP:
- case WLAN_EID_PERR:
- case WLAN_EID_RANN:
- case WLAN_EID_CHANNEL_SWITCH:
- case WLAN_EID_EXT_CHANSWITCH_ANN:
- case WLAN_EID_COUNTRY:
- case WLAN_EID_PWR_CONSTRAINT:
- case WLAN_EID_TIMEOUT_INTERVAL:
- case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
- case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
- case WLAN_EID_CHAN_SWITCH_PARAM:
- case WLAN_EID_EXT_CAPABILITY:
- case WLAN_EID_CHAN_SWITCH_TIMING:
- case WLAN_EID_LINK_ID:
- case WLAN_EID_BSS_MAX_IDLE_PERIOD:
- case WLAN_EID_RSNX:
- case WLAN_EID_S1G_BCN_COMPAT:
- case WLAN_EID_S1G_CAPABILITIES:
- case WLAN_EID_S1G_OPERATION:
- case WLAN_EID_AID_RESPONSE:
- case WLAN_EID_S1G_SHORT_BCN_INTERVAL:
- /*
- * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
- * that if the content gets bigger it might be needed more than once
- */
- if (test_bit(id, seen_elems)) {
- elems->parse_error = true;
- continue;
- }
- break;
- }
-
- if (calc_crc && id < 64 && (params->filter & (1ULL << id)))
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- elem_parse_failed = false;
-
- switch (id) {
- case WLAN_EID_LINK_ID:
- if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
- elem_parse_failed = true;
- break;
- }
- elems->lnk_id = (void *)(pos - 2);
- break;
- case WLAN_EID_CHAN_SWITCH_TIMING:
- if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
- elem_parse_failed = true;
- break;
- }
- elems->ch_sw_timing = (void *)pos;
- break;
- case WLAN_EID_EXT_CAPABILITY:
- elems->ext_capab = pos;
- elems->ext_capab_len = elen;
- break;
- case WLAN_EID_SSID:
- elems->ssid = pos;
- elems->ssid_len = elen;
- break;
- case WLAN_EID_SUPP_RATES:
- elems->supp_rates = pos;
- elems->supp_rates_len = elen;
- break;
- case WLAN_EID_DS_PARAMS:
- if (elen >= 1)
- elems->ds_params = pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_TIM:
- if (elen >= sizeof(struct ieee80211_tim_ie)) {
- elems->tim = (void *)pos;
- elems->tim_len = elen;
- } else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
- pos[2] == 0xf2) {
- /* Microsoft OUI (00:50:F2) */
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- if (elen >= 5 && pos[3] == 2) {
- /* OUI Type 2 - WMM IE */
- if (pos[4] == 0) {
- elems->wmm_info = pos;
- elems->wmm_info_len = elen;
- } else if (pos[4] == 1) {
- elems->wmm_param = pos;
- elems->wmm_param_len = elen;
- }
- }
- }
- break;
- case WLAN_EID_RSN:
- elems->rsn = pos;
- elems->rsn_len = elen;
- break;
- case WLAN_EID_ERP_INFO:
- if (elen >= 1)
- elems->erp_info = pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_EXT_SUPP_RATES:
- elems->ext_supp_rates = pos;
- elems->ext_supp_rates_len = elen;
- break;
- case WLAN_EID_HT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_ht_cap))
- elems->ht_cap_elem = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_HT_OPERATION:
- if (elen >= sizeof(struct ieee80211_ht_operation))
- elems->ht_operation = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VHT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_vht_cap))
- elems->vht_cap_elem = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VHT_OPERATION:
- if (elen >= sizeof(struct ieee80211_vht_operation)) {
- elems->vht_operation = (void *)pos;
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
- break;
- }
- elem_parse_failed = true;
- break;
- case WLAN_EID_OPMODE_NOTIF:
- if (elen > 0) {
- elems->opmode_notif = pos;
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
- break;
- }
- elem_parse_failed = true;
- break;
- case WLAN_EID_MESH_ID:
- elems->mesh_id = pos;
- elems->mesh_id_len = elen;
- break;
- case WLAN_EID_MESH_CONFIG:
- if (elen >= sizeof(struct ieee80211_meshconf_ie))
- elems->mesh_config = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_PEER_MGMT:
- elems->peering = pos;
- elems->peering_len = elen;
- break;
- case WLAN_EID_MESH_AWAKE_WINDOW:
- if (elen >= 2)
- elems->awake_window = (void *)pos;
- break;
- case WLAN_EID_PREQ:
- elems->preq = pos;
- elems->preq_len = elen;
- break;
- case WLAN_EID_PREP:
- elems->prep = pos;
- elems->prep_len = elen;
- break;
- case WLAN_EID_PERR:
- elems->perr = pos;
- elems->perr_len = elen;
- break;
- case WLAN_EID_RANN:
- if (elen >= sizeof(struct ieee80211_rann_ie))
- elems->rann = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_CHANNEL_SWITCH:
- if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->ch_switch_ie = (void *)pos;
- break;
- case WLAN_EID_EXT_CHANSWITCH_ANN:
- if (elen != sizeof(struct ieee80211_ext_chansw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->ext_chansw_ie = (void *)pos;
- break;
- case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
- if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->sec_chan_offs = (void *)pos;
- break;
- case WLAN_EID_CHAN_SWITCH_PARAM:
- if (elen <
- sizeof(*elems->mesh_chansw_params_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->mesh_chansw_params_ie = (void *)pos;
- break;
- case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
- if (!params->action ||
- elen < sizeof(*elems->wide_bw_chansw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->wide_bw_chansw_ie = (void *)pos;
- break;
- case WLAN_EID_CHANNEL_SWITCH_WRAPPER:
- if (params->action) {
- elem_parse_failed = true;
- break;
- }
- /*
- * This is a bit tricky, but as we only care about
- * a few elements, parse them out manually.
- */
- subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
- pos, elen);
- if (subelem) {
- if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie))
- elems->wide_bw_chansw_ie =
- (void *)subelem->data;
- else
- elem_parse_failed = true;
- }
-
- subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION,
- pos, elen);
- if (subelem) {
- const void *edata = subelem->data + 1;
- u8 edatalen = subelem->datalen - 1;
-
- if (ieee80211_bandwidth_indication_size_ok(edata,
- edatalen))
- elems->bandwidth_indication = edata;
- else
- elem_parse_failed = true;
- }
- break;
- case WLAN_EID_COUNTRY:
- elems->country_elem = pos;
- elems->country_elem_len = elen;
- break;
- case WLAN_EID_PWR_CONSTRAINT:
- if (elen != 1) {
- elem_parse_failed = true;
- break;
- }
- elems->pwr_constr_elem = pos;
- break;
- case WLAN_EID_CISCO_VENDOR_SPECIFIC:
- /* Lots of different options exist, but we only care
- * about the Dynamic Transmit Power Control element.
- * First check for the Cisco OUI, then for the DTPC
- * tag (0x00).
- */
- if (elen < 4) {
- elem_parse_failed = true;
- break;
- }
-
- if (pos[0] != 0x00 || pos[1] != 0x40 ||
- pos[2] != 0x96 || pos[3] != 0x00)
- break;
-
- if (elen != 6) {
- elem_parse_failed = true;
- break;
- }
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- elems->cisco_dtpc_elem = pos;
- break;
- case WLAN_EID_ADDBA_EXT:
- if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->addba_ext_ie = (void *)pos;
- break;
- case WLAN_EID_TIMEOUT_INTERVAL:
- if (elen >= sizeof(struct ieee80211_timeout_interval_ie))
- elems->timeout_int = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_BSS_MAX_IDLE_PERIOD:
- if (elen >= sizeof(*elems->max_idle_period_ie))
- elems->max_idle_period_ie = (void *)pos;
- break;
- case WLAN_EID_RSNX:
- elems->rsnx = pos;
- elems->rsnx_len = elen;
- break;
- case WLAN_EID_TX_POWER_ENVELOPE:
- if (elen < 1 ||
- elen > sizeof(struct ieee80211_tx_pwr_env))
- break;
-
- if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env))
- break;
-
- elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos;
- elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen;
- elems->tx_pwr_env_num++;
- break;
- case WLAN_EID_EXTENSION:
- ieee80211_parse_extension_element(calc_crc ?
- &crc : NULL,
- elem, elems, params);
- break;
- case WLAN_EID_S1G_CAPABILITIES:
- if (elen >= sizeof(*elems->s1g_capab))
- elems->s1g_capab = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_S1G_OPERATION:
- if (elen == sizeof(*elems->s1g_oper))
- elems->s1g_oper = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_S1G_BCN_COMPAT:
- if (elen == sizeof(*elems->s1g_bcn_compat))
- elems->s1g_bcn_compat = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_AID_RESPONSE:
- if (elen == sizeof(struct ieee80211_aid_response_ie))
- elems->aid_resp = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- default:
- break;
- }
-
- if (elem_parse_failed)
- elems->parse_error = true;
- else
- __set_bit(id, seen_elems);
- }
-
- if (!for_each_element_completed(elem, params->start, params->len))
- elems->parse_error = true;
-
- return crc;
-}
-
-static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
- struct ieee802_11_elems *elems,
- struct cfg80211_bss *bss,
- u8 *nontransmitted_profile)
-{
- const struct element *elem, *sub;
- size_t profile_len = 0;
- bool found = false;
-
- if (!bss || !bss->transmitted_bss)
- return profile_len;
-
- for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
- if (elem->datalen < 2)
- continue;
- if (elem->data[0] < 1 || elem->data[0] > 8)
- continue;
-
- for_each_element(sub, elem->data + 1, elem->datalen - 1) {
- u8 new_bssid[ETH_ALEN];
- const u8 *index;
-
- if (sub->id != 0 || sub->datalen < 4) {
- /* not a valid BSS profile */
- continue;
- }
-
- if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
- sub->data[1] != 2) {
- /* The first element of the
- * Nontransmitted BSSID Profile is not
- * the Nontransmitted BSSID Capability
- * element.
- */
- continue;
- }
-
- memset(nontransmitted_profile, 0, len);
- profile_len = cfg80211_merge_profile(start, len,
- elem,
- sub,
- nontransmitted_profile,
- len);
-
- /* found a Nontransmitted BSSID Profile */
- index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
- nontransmitted_profile,
- profile_len);
- if (!index || index[1] < 1 || index[2] == 0) {
- /* Invalid MBSSID Index element */
- continue;
- }
-
- cfg80211_gen_new_bssid(bss->transmitted_bss->bssid,
- elem->data[0],
- index[2],
- new_bssid);
- if (ether_addr_equal(new_bssid, bss->bssid)) {
- found = true;
- elems->bssid_index_len = index[1];
- elems->bssid_index = (void *)&index[2];
- break;
- }
- }
- }
-
- return found ? profile_len : 0;
-}
-
-static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems,
- u8 link_id)
-{
- const struct ieee80211_multi_link_elem *ml = elems->ml_basic;
- ssize_t ml_len = elems->ml_basic_len;
- const struct element *sub;
-
- if (!ml || !ml_len)
- return;
-
- if (le16_get_bits(ml->control, IEEE80211_ML_CONTROL_TYPE) !=
- IEEE80211_ML_CONTROL_TYPE_BASIC)
- return;
-
- for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
- struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
- ssize_t sta_prof_len;
- u16 control;
-
- if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
- continue;
-
- if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
- sub->datalen))
- return;
-
- control = le16_to_cpu(prof->control);
-
- if (link_id != u16_get_bits(control,
- IEEE80211_MLE_STA_CONTROL_LINK_ID))
- continue;
-
- if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
- return;
-
- /* the sub element can be fragmented */
- sta_prof_len =
- cfg80211_defragment_element(sub,
- (u8 *)ml, ml_len,
- elems->scratch_pos,
- elems->scratch +
- elems->scratch_len -
- elems->scratch_pos,
- IEEE80211_MLE_SUBELEM_FRAGMENT);
-
- if (sta_prof_len < 0)
- return;
-
- elems->prof = (void *)elems->scratch_pos;
- elems->sta_prof_len = sta_prof_len;
- elems->scratch_pos += sta_prof_len;
-
- return;
- }
-}
-
-static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems,
- struct ieee80211_elems_parse_params *params)
-{
- struct ieee80211_mle_per_sta_profile *prof;
- struct ieee80211_elems_parse_params sub = {
- .action = params->action,
- .from_ap = params->from_ap,
- .link_id = -1,
- };
- ssize_t ml_len = elems->ml_basic_len;
- const struct element *non_inherit = NULL;
- const u8 *end;
-
- if (params->link_id == -1)
- return;
-
- ml_len = cfg80211_defragment_element(elems->ml_basic_elem,
- elems->ie_start,
- elems->total_len,
- elems->scratch_pos,
- elems->scratch +
- elems->scratch_len -
- elems->scratch_pos,
- WLAN_EID_FRAGMENT);
-
- if (ml_len < 0)
- return;
-
- elems->ml_basic = (const void *)elems->scratch_pos;
- elems->ml_basic_len = ml_len;
-
- ieee80211_mle_get_sta_prof(elems, params->link_id);
- prof = elems->prof;
-
- if (!prof)
- return;
-
- /* check if we have the 4 bytes for the fixed part in assoc response */
- if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
- elems->prof = NULL;
- elems->sta_prof_len = 0;
- return;
- }
-
- /*
- * Skip the capability information and the status code that are expected
- * as part of the station profile in association response frames. Note
- * the -1 is because the 'sta_info_len' is accounted to as part of the
- * per-STA profile, but not part of the 'u8 variable[]' portion.
- */
- sub.start = prof->variable + prof->sta_info_len - 1 + 4;
- end = (const u8 *)prof + elems->sta_prof_len;
- sub.len = end - sub.start;
-
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- sub.start, sub.len);
- _ieee802_11_parse_elems_full(&sub, elems, non_inherit);
-}
-
-struct ieee802_11_elems *
-ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
-{
- struct ieee802_11_elems *elems;
- const struct element *non_inherit = NULL;
- u8 *nontransmitted_profile;
- int nontransmitted_profile_len = 0;
- size_t scratch_len = 3 * params->len;
-
- elems = kzalloc(struct_size(elems, scratch, scratch_len), GFP_ATOMIC);
- if (!elems)
- return NULL;
- elems->ie_start = params->start;
- elems->total_len = params->len;
- elems->scratch_len = scratch_len;
- elems->scratch_pos = elems->scratch;
-
- nontransmitted_profile = elems->scratch_pos;
- nontransmitted_profile_len =
- ieee802_11_find_bssid_profile(params->start, params->len,
- elems, params->bss,
- nontransmitted_profile);
- elems->scratch_pos += nontransmitted_profile_len;
- elems->scratch_len -= nontransmitted_profile_len;
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- nontransmitted_profile,
- nontransmitted_profile_len);
-
- elems->crc = _ieee802_11_parse_elems_full(params, elems, non_inherit);
-
- /* Override with nontransmitted profile, if found */
- if (nontransmitted_profile_len) {
- struct ieee80211_elems_parse_params sub = {
- .start = nontransmitted_profile,
- .len = nontransmitted_profile_len,
- .action = params->action,
- .link_id = params->link_id,
- };
-
- _ieee802_11_parse_elems_full(&sub, elems, NULL);
- }
-
- ieee80211_mle_parse_link(elems, params);
-
- if (elems->tim && !elems->parse_error) {
- const struct ieee80211_tim_ie *tim_ie = elems->tim;
-
- elems->dtim_period = tim_ie->dtim_period;
- elems->dtim_count = tim_ie->dtim_count;
- }
-
- /* Override DTIM period and count if needed */
- if (elems->bssid_index &&
- elems->bssid_index_len >=
- offsetofend(struct ieee80211_bssid_index, dtim_period))
- elems->dtim_period = elems->bssid_index->dtim_period;
-
- if (elems->bssid_index &&
- elems->bssid_index_len >=
- offsetofend(struct ieee80211_bssid_index, dtim_count))
- elems->dtim_count = elems->bssid_index->dtim_count;
-
- return elems;
-}
-EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full);
-
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_queue_params
*qparam, int ac)
@@ -1938,37 +1173,34 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
}
}
-u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end)
+static int ieee80211_put_s1g_cap(struct sk_buff *skb,
+ struct ieee80211_sta_s1g_cap *s1g_cap)
{
- if ((end - pos) < 5)
- return pos;
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_s1g_cap))
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- *pos++ = 1 + sizeof(cap);
- *pos++ = WLAN_EID_EXT_HE_6GHZ_CAPA;
- memcpy(pos, &cap, sizeof(cap));
+ skb_put_u8(skb, WLAN_EID_S1G_CAPABILITIES);
+ skb_put_u8(skb, sizeof(struct ieee80211_s1g_cap));
- return pos + 2;
+ skb_put_data(skb, &s1g_cap->cap, sizeof(s1g_cap->cap));
+ skb_put_data(skb, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs));
+
+ return 0;
}
-static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
- u8 *buffer, size_t buffer_len,
- const u8 *ie, size_t ie_len,
- enum nl80211_band band,
- u32 rate_mask,
- struct cfg80211_chan_def *chandef,
- size_t *offset, u32 flags)
+static int ieee80211_put_preq_ies_band(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *ie, size_t ie_len,
+ size_t *offset,
+ enum nl80211_band band,
+ u32 rate_mask,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
- u8 *pos = buffer, *end = buffer + buffer_len;
+ int i, err;
size_t noffset;
- int supp_rates_len, i;
- u8 rates[32];
- int num_rates;
- int ext_rates_len;
u32 rate_flags;
bool have_80mhz = false;
@@ -1981,32 +1213,13 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
rate_flags = ieee80211_chandef_rate_flags(chandef);
/* For direct scan add S1G IE and consider its override bits */
- if (band == NL80211_BAND_S1GHZ) {
- if (end - pos < 2 + sizeof(struct ieee80211_s1g_cap))
- goto out_err;
- pos = ieee80211_ie_build_s1g_cap(pos, &sband->s1g_cap);
- goto done;
- }
+ if (band == NL80211_BAND_S1GHZ)
+ return ieee80211_put_s1g_cap(skb, &sband->s1g_cap);
- num_rates = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if ((BIT(i) & rate_mask) == 0)
- continue; /* skip rate */
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
-
- rates[num_rates++] =
- (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- }
-
- supp_rates_len = min_t(int, num_rates, 8);
-
- if (end - pos < 2 + supp_rates_len)
- goto out_err;
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
- memcpy(pos, rates, supp_rates_len);
- pos += supp_rates_len;
+ err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags,
+ ~rate_mask, WLAN_EID_SUPP_RATES);
+ if (err)
+ return err;
/* insert "request information" if in custom IEs */
if (ie && ie_len) {
@@ -2019,34 +1232,28 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
before_extrates,
ARRAY_SIZE(before_extrates),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
- ext_rates_len = num_rates - supp_rates_len;
- if (ext_rates_len > 0) {
- if (end - pos < 2 + ext_rates_len)
- goto out_err;
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = ext_rates_len;
- memcpy(pos, rates + supp_rates_len, ext_rates_len);
- pos += ext_rates_len;
- }
+ err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags,
+ ~rate_mask, WLAN_EID_EXT_SUPP_RATES);
+ if (err)
+ return err;
if (chandef->chan && sband->band == NL80211_BAND_2GHZ) {
- if (end - pos < 3)
- goto out_err;
- *pos++ = WLAN_EID_DS_PARAMS;
- *pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(
- chandef->chan->center_freq);
+ if (skb_tailroom(skb) < 3)
+ return -ENOBUFS;
+ skb_put_u8(skb, WLAN_EID_DS_PARAMS);
+ skb_put_u8(skb, 1);
+ skb_put_u8(skb,
+ ieee80211_frequency_to_channel(chandef->chan->center_freq));
}
if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT)
- goto done;
+ return 0;
/* insert custom IEs that go before HT */
if (ie && ie_len) {
@@ -2061,18 +1268,21 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_ht, ARRAY_SIZE(before_ht),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
if (sband->ht_cap.ht_supported) {
- if (end - pos < 2 + sizeof(struct ieee80211_ht_cap))
- goto out_err;
- pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
- sband->ht_cap.cap);
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
+ return -ENOBUFS;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
+ ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+ sband->ht_cap.cap);
}
/* insert custom IEs that go before VHT */
@@ -2093,10 +1303,9 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_vht, ARRAY_SIZE(before_vht),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
@@ -2111,10 +1320,14 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
}
if (sband->vht_cap.vht_supported && have_80mhz) {
- if (end - pos < 2 + sizeof(struct ieee80211_vht_cap))
- goto out_err;
- pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
- sband->vht_cap.cap);
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
+ return -ENOBUFS;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap));
+ ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
+ sband->vht_cap.cap);
}
/* insert custom IEs that go before HE */
@@ -2131,107 +1344,128 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_he, ARRAY_SIZE(before_he),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- if (he_cap &&
- cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+ if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
IEEE80211_CHAN_NO_HE)) {
- pos = ieee80211_ie_build_he_cap(0, pos, he_cap, end);
- if (!pos)
- goto out_err;
+ err = ieee80211_put_he_cap(skb, sdata, sband, NULL);
+ if (err)
+ return err;
}
- eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
-
- if (eht_cap &&
- cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+ if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
IEEE80211_CHAN_NO_HE |
IEEE80211_CHAN_NO_EHT)) {
- pos = ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, end,
- sdata->vif.type == NL80211_IFTYPE_AP);
- if (!pos)
- goto out_err;
+ err = ieee80211_put_eht_cap(skb, sdata, sband, NULL);
+ if (err)
+ return err;
}
- if (cfg80211_any_usable_channels(local->hw.wiphy,
- BIT(NL80211_BAND_6GHZ),
- IEEE80211_CHAN_NO_HE)) {
- struct ieee80211_supported_band *sband6;
-
- sband6 = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
- he_cap = ieee80211_get_he_iftype_cap_vif(sband6, &sdata->vif);
-
- if (he_cap) {
- enum nl80211_iftype iftype =
- ieee80211_vif_type_p2p(&sdata->vif);
- __le16 cap = ieee80211_get_he_6ghz_capa(sband6, iftype);
-
- pos = ieee80211_write_he_6ghz_cap(pos, cap, end);
- }
- }
+ err = ieee80211_put_he_6ghz_cap(skb, sdata, IEEE80211_SMPS_OFF);
+ if (err)
+ return err;
/*
* If adding more here, adjust code in main.c
* that calculates local->scan_ies_len.
*/
- return pos - buffer;
- out_err:
- WARN_ONCE(1, "not enough space for preq IEs\n");
- done:
- return pos - buffer;
+ return 0;
}
-int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer,
- size_t buffer_len,
- struct ieee80211_scan_ies *ie_desc,
- const u8 *ie, size_t ie_len,
- u8 bands_used, u32 *rate_masks,
- struct cfg80211_chan_def *chandef,
- u32 flags)
+static int ieee80211_put_preq_ies(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_scan_ies *ie_desc,
+ const u8 *ie, size_t ie_len,
+ u8 bands_used, u32 *rate_masks,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
{
- size_t pos = 0, old_pos = 0, custom_ie_offset = 0;
- int i;
+ size_t custom_ie_offset = 0;
+ int i, err;
memset(ie_desc, 0, sizeof(*ie_desc));
for (i = 0; i < NUM_NL80211_BANDS; i++) {
if (bands_used & BIT(i)) {
- pos += ieee80211_build_preq_ies_band(sdata,
- buffer + pos,
- buffer_len - pos,
- ie, ie_len, i,
- rate_masks[i],
- chandef,
- &custom_ie_offset,
- flags);
- ie_desc->ies[i] = buffer + old_pos;
- ie_desc->len[i] = pos - old_pos;
- old_pos = pos;
+ ie_desc->ies[i] = skb_tail_pointer(skb);
+ err = ieee80211_put_preq_ies_band(skb, sdata,
+ ie, ie_len,
+ &custom_ie_offset,
+ i, rate_masks[i],
+ chandef, flags);
+ if (err)
+ return err;
+ ie_desc->len[i] = skb_tail_pointer(skb) -
+ ie_desc->ies[i];
}
}
/* add any remaining custom IEs */
if (ie && ie_len) {
- if (WARN_ONCE(buffer_len - pos < ie_len - custom_ie_offset,
+ if (WARN_ONCE(skb_tailroom(skb) < ie_len - custom_ie_offset,
"not enough space for preq custom IEs\n"))
- return pos;
- memcpy(buffer + pos, ie + custom_ie_offset,
- ie_len - custom_ie_offset);
- ie_desc->common_ies = buffer + pos;
- ie_desc->common_ie_len = ie_len - custom_ie_offset;
- pos += ie_len - custom_ie_offset;
+ return -ENOBUFS;
+ ie_desc->common_ies = skb_tail_pointer(skb);
+ skb_put_data(skb, ie + custom_ie_offset,
+ ie_len - custom_ie_offset);
+ ie_desc->common_ie_len = skb_tail_pointer(skb) -
+ ie_desc->common_ies;
}
- return pos;
+ return 0;
};
+int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer,
+ size_t buffer_len,
+ struct ieee80211_scan_ies *ie_desc,
+ const u8 *ie, size_t ie_len,
+ u8 bands_used, u32 *rate_masks,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
+{
+ struct sk_buff *skb = alloc_skb(buffer_len, GFP_KERNEL);
+ uintptr_t offs;
+ int ret, i;
+ u8 *start;
+
+ if (!skb)
+ return -ENOMEM;
+
+ start = skb_tail_pointer(skb);
+ memset(start, 0, skb_tailroom(skb));
+ ret = ieee80211_put_preq_ies(skb, sdata, ie_desc, ie, ie_len,
+ bands_used, rate_masks, chandef,
+ flags);
+ if (ret < 0) {
+ goto out;
+ }
+
+ if (skb->len > buffer_len) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ memcpy(buffer, start, skb->len);
+
+ /* adjust ie_desc for copy */
+ for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ offs = ie_desc->ies[i] - start;
+ ie_desc->ies[i] = buffer + offs;
+ }
+ offs = ie_desc->common_ies - start;
+ ie_desc->common_ies = buffer + offs;
+
+ ret = skb->len;
+out:
+ consume_skb(skb);
+ return ret;
+}
+
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
const u8 *src, const u8 *dst,
u32 ratemask,
@@ -2244,7 +1478,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
struct cfg80211_chan_def chandef;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- int ies_len;
u32 rate_masks[NUM_NL80211_BANDS] = {};
struct ieee80211_scan_ies dummy_ie_desc;
@@ -2253,7 +1486,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
* in order to maximize the chance that we get a response. Some
* badly-behaved APs don't respond when this parameter is included.
*/
- chandef.width = sdata->vif.bss_conf.chandef.width;
+ chandef.width = sdata->vif.bss_conf.chanreq.oper.width;
if (flags & IEEE80211_PROBE_FLAG_DIRECTED)
chandef.chan = NULL;
else
@@ -2265,11 +1498,9 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
return NULL;
rate_masks[chan->band] = ratemask;
- ies_len = ieee80211_build_preq_ies(sdata, skb_tail_pointer(skb),
- skb_tailroom(skb), &dummy_ie_desc,
- ie, ie_len, BIT(chan->band),
- rate_masks, &chandef, flags);
- skb_put(skb, ies_len);
+ ieee80211_put_preq_ies(skb, sdata, &dummy_ie_desc,
+ ie, ie_len, BIT(chan->band),
+ rate_masks, &chandef, flags);
if (dst) {
mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -2295,7 +1526,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sband))
return 1;
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
num_rates = sband->n_bitrates;
supp_rates = 0;
@@ -2416,9 +1648,6 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
lockdep_assert_wiphy(local->hw.wiphy);
- if (!local->use_chanctx)
- return;
-
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->hw.wiphy->mtx));
if (conf) {
@@ -2648,20 +1877,20 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
/* add channel contexts */
- if (local->use_chanctx) {
- list_for_each_entry(ctx, &local->chanctx_list, list)
- if (ctx->replace_state !=
- IEEE80211_CHANCTX_REPLACES_OTHER)
- WARN_ON(drv_add_chanctx(local, ctx));
-
- sdata = wiphy_dereference(local->hw.wiphy,
- local->monitor_sdata);
- if (sdata && ieee80211_sdata_running(sdata))
- ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
- }
+ list_for_each_entry(ctx, &local->chanctx_list, list)
+ if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+ WARN_ON(drv_add_chanctx(local, ctx));
+
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
+ if (sdata && ieee80211_sdata_running(sdata))
+ ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
/* reconfigure hardware */
- ieee80211_hw_config(local, ~0);
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL |
+ IEEE80211_CONF_CHANGE_MONITOR |
+ IEEE80211_CONF_CHANGE_PS |
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS |
+ IEEE80211_CONF_CHANGE_IDLE);
ieee80211_configure_filter(local);
@@ -2706,8 +1935,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
for (link_id = 0;
link_id < ARRAY_SIZE(sdata->vif.link_conf);
link_id++) {
- if (ieee80211_vif_is_mld(&sdata->vif) &&
- !(sdata->vif.active_links & BIT(link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_id))
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
@@ -2756,9 +1984,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
sdata->vif.bss_conf.protected_keep_alive)
changed |= BSS_CHANGED_KEEP_ALIVE;
- if (sdata->vif.bss_conf.eht_puncturing)
- changed |= BSS_CHANGED_EHT_PUNCTURING;
-
ieee80211_bss_info_change_notify(sdata,
changed);
} else if (!WARN_ON(!link)) {
@@ -3109,21 +2334,6 @@ size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
return pos;
}
-u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap)
-{
- *pos++ = WLAN_EID_S1G_CAPABILITIES;
- *pos++ = sizeof(struct ieee80211_s1g_cap);
- memset(pos, 0, sizeof(struct ieee80211_s1g_cap));
-
- memcpy(pos, &s1g_cap->cap, sizeof(s1g_cap->cap));
- pos += sizeof(s1g_cap->cap);
-
- memcpy(pos, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs));
- pos += sizeof(s1g_cap->nss_mcs);
-
- return pos;
-}
-
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 cap)
{
@@ -3180,7 +2390,8 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
return pos;
}
-u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+/* this may return more than ieee80211_put_he_6ghz_cap() will need */
+u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata)
{
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
@@ -3190,7 +2401,7 @@ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
if (!sband)
return 0;
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
if (!he_cap)
return 0;
@@ -3201,38 +2412,75 @@ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
he_cap->he_cap_elem.phy_cap_info);
}
-u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
+static void
+ieee80211_get_adjusted_he_cap(const struct ieee80211_conn_settings *conn,
const struct ieee80211_sta_he_cap *he_cap,
- u8 *end)
+ struct ieee80211_he_cap_elem *elem)
{
- struct ieee80211_he_cap_elem elem;
- u8 n;
- u8 ie_len;
- u8 *orig_pos = pos;
+ u8 ru_limit, max_ru;
- /* Make sure we have place for the IE */
- /*
- * TODO: the 1 added is because this temporarily is under the EXTENSION
- * IE. Get rid of it when it moves.
- */
- if (!he_cap)
- return orig_pos;
+ *elem = he_cap->he_cap_elem;
- /* modify on stack first to calculate 'n' and 'ie_len' correctly */
- elem = he_cap->he_cap_elem;
+ switch (conn->bw_limit) {
+ case IEEE80211_CONN_BW_LIMIT_20:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242;
+ break;
+ case IEEE80211_CONN_BW_LIMIT_40:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ break;
+ case IEEE80211_CONN_BW_LIMIT_80:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ break;
+ default:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996;
+ break;
+ }
+
+ max_ru = elem->phy_cap_info[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK;
+ max_ru = min(max_ru, ru_limit);
+ elem->phy_cap_info[8] &= ~IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK;
+ elem->phy_cap_info[8] |= max_ru;
- if (disable_flags & IEEE80211_CONN_DISABLE_40MHZ)
- elem.phy_cap_info[0] &=
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
+ elem->phy_cap_info[0] &=
~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G);
+ elem->phy_cap_info[9] &=
+ ~IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM;
+ }
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) {
+ elem->phy_cap_info[0] &=
+ ~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G);
+ elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ elem->phy_cap_info[7] &=
+ ~(IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ);
+ }
+}
- if (disable_flags & IEEE80211_CONN_DISABLE_160MHZ)
- elem.phy_cap_info[0] &=
- ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+int ieee80211_put_he_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn)
+{
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_he_cap_elem elem;
+ u8 *len;
+ u8 n;
+ u8 ie_len;
- if (disable_flags & IEEE80211_CONN_DISABLE_80P80MHZ)
- elem.phy_cap_info[0] &=
- ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ if (!conn)
+ conn = &ieee80211_conn_settings_unlimited;
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ if (!he_cap)
+ return 0;
+
+ /* modify on stack first to calculate 'n' and 'ie_len' correctly */
+ ieee80211_get_adjusted_he_cap(conn, he_cap, &elem);
n = ieee80211_he_mcs_nss_size(&elem);
ie_len = 2 + 1 +
@@ -3240,19 +2488,17 @@ u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
ieee80211_he_ppe_size(he_cap->ppe_thres[0],
he_cap->he_cap_elem.phy_cap_info);
- if ((end - pos) < ie_len)
- return orig_pos;
+ if (skb_tailroom(skb) < ie_len)
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- pos++; /* We'll set the size later below */
- *pos++ = WLAN_EID_EXT_HE_CAPABILITY;
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ len = skb_put(skb, 1); /* We'll set the size later below */
+ skb_put_u8(skb, WLAN_EID_EXT_HE_CAPABILITY);
/* Fixed data */
- memcpy(pos, &elem, sizeof(elem));
- pos += sizeof(elem);
+ skb_put_data(skb, &elem, sizeof(elem));
- memcpy(pos, &he_cap->he_mcs_nss_supp, n);
- pos += n;
+ skb_put_data(skb, &he_cap->he_mcs_nss_supp, n);
/* Check if PPE Threshold should be present */
if ((he_cap->he_cap_elem.phy_cap_info[6] &
@@ -3276,41 +2522,39 @@ u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
n = DIV_ROUND_UP(n, 8);
/* Copy PPE Thresholds */
- memcpy(pos, &he_cap->ppe_thres, n);
- pos += n;
+ skb_put_data(skb, &he_cap->ppe_thres, n);
end:
- orig_pos[1] = (pos - orig_pos) - 2;
- return pos;
+ *len = skb_tail_pointer(skb) - len - 1;
+ return 0;
}
-void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode,
- struct sk_buff *skb)
+int ieee80211_put_he_6ghz_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode)
{
struct ieee80211_supported_band *sband;
const struct ieee80211_sband_iftype_data *iftd;
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
- u8 *pos;
- u16 cap;
+ __le16 cap;
if (!cfg80211_any_usable_channels(sdata->local->hw.wiphy,
BIT(NL80211_BAND_6GHZ),
IEEE80211_CHAN_NO_HE))
- return;
+ return 0;
sband = sdata->local->hw.wiphy->bands[NL80211_BAND_6GHZ];
iftd = ieee80211_get_sband_iftype_data(sband, iftype);
if (!iftd)
- return;
+ return 0;
/* Check for device HE 6 GHz capability before adding element */
if (!iftd->he_6ghz_capa.capa)
- return;
+ return 0;
- cap = le16_to_cpu(iftd->he_6ghz_capa.capa);
- cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS;
+ cap = iftd->he_6ghz_capa.capa;
+ cap &= cpu_to_le16(~IEEE80211_HE_6GHZ_CAP_SM_PS);
switch (smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
@@ -3318,22 +2562,27 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
WARN_ON(1);
fallthrough;
case IEEE80211_SMPS_OFF:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
case IEEE80211_SMPS_STATIC:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
case IEEE80211_SMPS_DYNAMIC:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
}
- pos = skb_put(skb, 2 + 1 + sizeof(cap));
- ieee80211_write_he_6ghz_cap(pos, cpu_to_le16(cap),
- pos + 2 + 1 + sizeof(cap));
+ if (skb_tailroom(skb) < 2 + 1 + sizeof(cap))
+ return -ENOBUFS;
+
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ skb_put_u8(skb, 1 + sizeof(cap));
+ skb_put_u8(skb, WLAN_EID_EXT_HE_6GHZ_CAPA);
+ skb_put_data(skb, &cap, sizeof(cap));
+ return 0;
}
u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
@@ -3785,7 +3034,6 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
}
void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
- bool support_160, bool support_320,
struct cfg80211_chan_def *chandef)
{
chandef->center_freq1 =
@@ -3804,90 +3052,38 @@ void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
chandef->width = NL80211_CHAN_WIDTH_80;
break;
case IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ:
- if (support_160) {
- chandef->width = NL80211_CHAN_WIDTH_160;
- chandef->center_freq1 =
- ieee80211_channel_to_frequency(info->ccfs1,
- chandef->chan->band);
- } else {
- chandef->width = NL80211_CHAN_WIDTH_80;
- }
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 =
+ ieee80211_channel_to_frequency(info->ccfs1,
+ chandef->chan->band);
break;
case IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ:
- if (support_320) {
- chandef->width = NL80211_CHAN_WIDTH_320;
- chandef->center_freq1 =
- ieee80211_channel_to_frequency(info->ccfs1,
- chandef->chan->band);
- } else if (support_160) {
- chandef->width = NL80211_CHAN_WIDTH_160;
- } else {
- chandef->width = NL80211_CHAN_WIDTH_80;
-
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chandef->center_freq1 -= 40;
- else
- chandef->center_freq1 += 40;
- }
+ chandef->width = NL80211_CHAN_WIDTH_320;
+ chandef->center_freq1 =
+ ieee80211_channel_to_frequency(info->ccfs1,
+ chandef->chan->band);
break;
}
}
-bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
struct cfg80211_chan_def he_chandef = *chandef;
const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
- struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
- bool support_80_80, support_160, support_320;
- u8 he_phy_cap, eht_phy_cap;
u32 freq;
if (chandef->chan->band != NL80211_BAND_6GHZ)
return true;
- sband = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
-
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
- if (!he_cap) {
- sdata_info(sdata, "Missing iftype sband data/HE cap");
- return false;
- }
-
- he_phy_cap = he_cap->he_cap_elem.phy_cap_info[0];
- support_160 =
- he_phy_cap &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- support_80_80 =
- he_phy_cap &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
-
- if (!he_oper) {
- sdata_info(sdata,
- "HE is not advertised on (on %d MHz), expect issues\n",
- chandef->chan->center_freq);
+ if (!he_oper)
return false;
- }
-
- eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
- if (!eht_cap)
- eht_oper = NULL;
he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
-
- if (!he_6ghz_oper) {
- sdata_info(sdata,
- "HE 6GHz operation missing (on %d MHz), expect issues\n",
- chandef->chan->center_freq);
+ if (!he_6ghz_oper)
return false;
- }
/*
* The EHT operation IE does not contain the primary channel so the
@@ -3896,20 +3092,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
*/
freq = ieee80211_channel_to_frequency(he_6ghz_oper->primary,
NL80211_BAND_6GHZ);
- he_chandef.chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
+ he_chandef.chan = ieee80211_get_channel(local->hw.wiphy, freq);
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- bss_conf->power_type = IEEE80211_REG_LPI_AP;
- break;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- bss_conf->power_type = IEEE80211_REG_SP_AP;
- break;
- default:
- bss_conf->power_type = IEEE80211_REG_UNSET_AP;
- break;
- }
+ if (!he_chandef.chan)
+ return false;
if (!eht_oper ||
!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
@@ -3928,13 +3114,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
he_chandef.width = NL80211_CHAN_WIDTH_80;
if (!he_6ghz_oper->ccfs1)
break;
- if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8) {
- if (support_160)
- he_chandef.width = NL80211_CHAN_WIDTH_160;
- } else {
- if (support_80_80)
- he_chandef.width = NL80211_CHAN_WIDTH_80P80;
- }
+ if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8)
+ he_chandef.width = NL80211_CHAN_WIDTH_160;
+ else
+ he_chandef.width = NL80211_CHAN_WIDTH_80P80;
break;
}
@@ -3946,30 +3129,17 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
he_chandef.center_freq1 =
ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0,
NL80211_BAND_6GHZ);
- if (support_80_80 || support_160)
- he_chandef.center_freq2 =
- ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
- NL80211_BAND_6GHZ);
+ he_chandef.center_freq2 =
+ ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
+ NL80211_BAND_6GHZ);
}
} else {
- eht_phy_cap = eht_cap->eht_cap_elem.phy_cap_info[0];
- support_320 =
- eht_phy_cap & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
-
ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- support_160, support_320,
&he_chandef);
}
- if (!cfg80211_chandef_valid(&he_chandef)) {
- sdata_info(sdata,
- "HE 6GHz operation resulted in invalid chandef: %d MHz/%d/%d MHz/%d MHz\n",
- he_chandef.chan ? he_chandef.chan->center_freq : 0,
- he_chandef.width,
- he_chandef.center_freq1,
- he_chandef.center_freq2);
+ if (!cfg80211_chandef_valid(&he_chandef))
return false;
- }
*chandef = he_chandef;
@@ -4012,121 +3182,62 @@ bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
return true;
}
-int ieee80211_parse_bitrates(enum nl80211_chan_width width,
- const struct ieee80211_supported_band *sband,
- const u8 *srates, int srates_len, u32 *rates)
-{
- u32 rate_flags = ieee80211_chanwidth_rate_flags(width);
- struct ieee80211_rate *br;
- int brate, rate, i, j, count = 0;
-
- *rates = 0;
-
- for (i = 0; i < srates_len; i++) {
- rate = srates[i] & 0x7f;
-
- for (j = 0; j < sband->n_bitrates; j++) {
- br = &sband->bitrates[j];
- if ((rate_flags & br->flags) != rate_flags)
- continue;
-
- brate = DIV_ROUND_UP(br->bitrate, 5);
- if (brate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
- }
- return count;
-}
-
-int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band)
+int ieee80211_put_srates_elem(struct sk_buff *skb,
+ const struct ieee80211_supported_band *sband,
+ u32 basic_rates, u32 rate_flags, u32 masked_rates,
+ u8 element_id)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- int rate;
- u8 i, rates, *pos;
- u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- u32 rate_flags;
+ u8 i, rates, skip;
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
- sband = local->hw.wiphy->bands[band];
rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
+ if (masked_rates & BIT(i))
+ continue;
rates++;
}
- if (rates > 8)
- rates = 8;
-
- if (skb_tailroom(skb) < rates + 2)
- return -ENOMEM;
-
- pos = skb_put(skb, rates + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = rates;
- for (i = 0; i < rates; i++) {
- u8 basic = 0;
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
- if (need_basic && basic_rates & BIT(i))
- basic = 0x80;
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = basic | (u8) rate;
+ if (element_id == WLAN_EID_SUPP_RATES) {
+ rates = min_t(u8, rates, 8);
+ skip = 0;
+ } else {
+ skip = 8;
+ if (rates <= skip)
+ return 0;
+ rates -= skip;
}
- return 0;
-}
+ if (skb_tailroom(skb) < rates + 2)
+ return -ENOBUFS;
-int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- int rate;
- u8 i, exrates, *pos;
- u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- u32 rate_flags;
+ skb_put_u8(skb, element_id);
+ skb_put_u8(skb, rates);
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ for (i = 0; i < sband->n_bitrates && rates; i++) {
+ int rate;
+ u8 basic;
- sband = local->hw.wiphy->bands[band];
- exrates = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
- exrates++;
- }
+ if (masked_rates & BIT(i))
+ continue;
- if (exrates > 8)
- exrates -= 8;
- else
- exrates = 0;
+ if (skip > 0) {
+ skip--;
+ continue;
+ }
- if (skb_tailroom(skb) < exrates + 2)
- return -ENOMEM;
+ basic = basic_rates & BIT(i) ? 0x80 : 0;
- if (exrates) {
- pos = skb_put(skb, exrates + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = exrates;
- for (i = 8; i < sband->n_bitrates; i++) {
- u8 basic = 0;
- if ((rate_flags & sband->bitrates[i].flags)
- != rate_flags)
- continue;
- if (need_basic && basic_rates & BIT(i))
- basic = 0x80;
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = basic | (u8) rate;
- }
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
+ skb_put_u8(skb, basic | (u8)rate);
+ rates--;
}
+
+ WARN(rates > 0, "rates confused: rates:%d, element:%d\n",
+ rates, element_id);
+
return 0;
}
@@ -4338,7 +3449,7 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
&sdata->deflink.dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chandef;
+ chandef = sdata->vif.bss_conf.chanreq.oper;
ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev,
&chandef,
@@ -4386,78 +3497,92 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(ieee80211_radar_detected);
-ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c)
+void ieee80211_chandef_downgrade(struct cfg80211_chan_def *c,
+ struct ieee80211_conn_settings *conn)
{
- ieee80211_conn_flags_t ret;
- int tmp;
+ enum nl80211_chan_width new_primary_width;
+ struct ieee80211_conn_settings _ignored = {};
+
+ /* allow passing NULL if caller doesn't care */
+ if (!conn)
+ conn = &_ignored;
+
+again:
+ /* no-HT indicates nothing to do */
+ new_primary_width = NL80211_CHAN_WIDTH_20_NOHT;
switch (c->width) {
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ WARN_ON_ONCE(1);
+ fallthrough;
case NL80211_CHAN_WIDTH_20:
c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ c->punctured = 0;
break;
case NL80211_CHAN_WIDTH_40:
c->width = NL80211_CHAN_WIDTH_20;
c->center_freq1 = c->chan->center_freq;
- ret = IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_VHT;
+ if (conn->mode == IEEE80211_CONN_MODE_VHT)
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ c->punctured = 0;
break;
case NL80211_CHAN_WIDTH_80:
- tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
- c->width = NL80211_CHAN_WIDTH_40;
- ret = IEEE80211_CONN_DISABLE_VHT;
+ new_primary_width = NL80211_CHAN_WIDTH_40;
+ if (conn->mode == IEEE80211_CONN_MODE_VHT)
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_40;
break;
case NL80211_CHAN_WIDTH_80P80:
c->center_freq2 = 0;
c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
break;
case NL80211_CHAN_WIDTH_160:
- /* n_P20 */
- tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P80 */
- tmp /= 4;
- c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
- c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
+ new_primary_width = NL80211_CHAN_WIDTH_80;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
break;
case NL80211_CHAN_WIDTH_320:
- /* n_P20 */
- tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
- /* n_P160 */
- tmp /= 8;
- c->center_freq1 = c->center_freq1 - 80 + 160 * tmp;
- c->width = NL80211_CHAN_WIDTH_160;
- ret = IEEE80211_CONN_DISABLE_320MHZ;
- break;
- default:
- case NL80211_CHAN_WIDTH_20_NOHT:
- WARN_ON_ONCE(1);
- c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ new_primary_width = NL80211_CHAN_WIDTH_160;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160;
break;
case NL80211_CHAN_WIDTH_1:
case NL80211_CHAN_WIDTH_2:
case NL80211_CHAN_WIDTH_4:
case NL80211_CHAN_WIDTH_8:
case NL80211_CHAN_WIDTH_16:
+ WARN_ON_ONCE(1);
+ /* keep c->width */
+ conn->mode = IEEE80211_CONN_MODE_S1G;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
WARN_ON_ONCE(1);
/* keep c->width */
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
break;
}
- WARN_ON_ONCE(!cfg80211_chandef_valid(c));
+ if (new_primary_width != NL80211_CHAN_WIDTH_20_NOHT) {
+ c->center_freq1 = cfg80211_chandef_primary(c, new_primary_width,
+ &c->punctured);
+ c->width = new_primary_width;
+ }
- return ret;
+ /*
+ * With an 80 MHz channel, we might have the puncturing in the primary
+ * 40 Mhz channel, but that's not valid when downgraded to 40 MHz width.
+ * In that case, downgrade again.
+ */
+ if (!cfg80211_chandef_valid(c) && c->punctured)
+ goto again;
+
+ WARN_ON_ONCE(!cfg80211_chandef_valid(c));
}
/*
@@ -4773,7 +3898,7 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list)
if (link->reserved_radar_required)
- radar_detect |= BIT(link->reserved_chandef.width);
+ radar_detect |= BIT(link->reserved.oper.width);
/*
* An in-place reservation context should not have any assigned vifs
@@ -4787,7 +3912,7 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
continue;
radar_detect |=
- BIT(link->conf->chandef.width);
+ BIT(link->conf->chanreq.oper.width);
}
return radar_detect;
@@ -5037,7 +4162,8 @@ u16 ieee80211_encode_usf(int listen_interval)
return (u16) listen_interval;
}
-u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+/* this may return more than ieee80211_put_eht_cap() will need */
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata)
{
const struct ieee80211_sta_he_cap *he_cap;
const struct ieee80211_sta_eht_cap *eht_cap;
@@ -5049,13 +4175,12 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
if (!sband)
return 0;
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
- eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
if (!he_cap || !eht_cap)
return 0;
- is_ap = iftype == NL80211_IFTYPE_AP ||
- iftype == NL80211_IFTYPE_P2P_GO;
+ is_ap = sdata->vif.type == NL80211_IFTYPE_AP;
n = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
&eht_cap->eht_cap_elem,
@@ -5067,45 +4192,134 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
return 0;
}
-u8 *ieee80211_ie_build_eht_cap(u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- const struct ieee80211_sta_eht_cap *eht_cap,
- u8 *end,
- bool for_ap)
+int ieee80211_put_eht_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn)
{
+ const struct ieee80211_sta_he_cap *he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_sta_eht_cap *eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ bool for_ap = sdata->vif.type == NL80211_IFTYPE_AP;
+ struct ieee80211_eht_cap_elem_fixed fixed;
+ struct ieee80211_he_cap_elem he;
u8 mcs_nss_len, ppet_len;
+ u8 orig_mcs_nss_len;
u8 ie_len;
- u8 *orig_pos = pos;
+
+ if (!conn)
+ conn = &ieee80211_conn_settings_unlimited;
/* Make sure we have place for the IE */
if (!he_cap || !eht_cap)
- return orig_pos;
+ return 0;
+
+ orig_mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+ &eht_cap->eht_cap_elem,
+ for_ap);
- mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem,
- for_ap);
+ ieee80211_get_adjusted_he_cap(conn, he_cap, &he);
+
+ fixed = eht_cap->eht_cap_elem;
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_80)
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ;
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) {
+ fixed.phy_cap_info[1] &=
+ ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK;
+ fixed.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK;
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ;
+ }
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320) {
+ fixed.phy_cap_info[0] &=
+ ~IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+ fixed.phy_cap_info[1] &=
+ ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK;
+ fixed.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ;
+ }
+
+ if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20)
+ fixed.phy_cap_info[0] &=
+ ~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ;
+
+ mcs_nss_len = ieee80211_eht_mcs_nss_size(&he, &fixed, for_ap);
ppet_len = ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
+ fixed.phy_cap_info);
ie_len = 2 + 1 + sizeof(eht_cap->eht_cap_elem) + mcs_nss_len + ppet_len;
- if ((end - pos) < ie_len)
- return orig_pos;
+ if (skb_tailroom(skb) < ie_len)
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- *pos++ = ie_len - 2;
- *pos++ = WLAN_EID_EXT_EHT_CAPABILITY;
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ skb_put_u8(skb, ie_len - 2);
+ skb_put_u8(skb, WLAN_EID_EXT_EHT_CAPABILITY);
+ skb_put_data(skb, &fixed, sizeof(fixed));
- /* Fixed data */
- memcpy(pos, &eht_cap->eht_cap_elem, sizeof(eht_cap->eht_cap_elem));
- pos += sizeof(eht_cap->eht_cap_elem);
+ if (mcs_nss_len == 4 && orig_mcs_nss_len != 4) {
+ /*
+ * If the (non-AP) STA became 20 MHz only, then convert from
+ * <=80 to 20-MHz-only format, where MCSes are indicated in
+ * the groups 0-7, 8-9, 10-11, 12-13 rather than just 0-9,
+ * 10-11, 12-13. Thus, use 0-9 for 0-7 and 8-9.
+ */
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs11_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss);
+ } else {
+ skb_put_data(skb, &eht_cap->eht_mcs_nss_supp, mcs_nss_len);
+ }
- memcpy(pos, &eht_cap->eht_mcs_nss_supp, mcs_nss_len);
- pos += mcs_nss_len;
+ if (ppet_len)
+ skb_put_data(skb, &eht_cap->eht_ppe_thres, ppet_len);
- if (ppet_len) {
- memcpy(pos, &eht_cap->eht_ppe_thres, ppet_len);
- pos += ppet_len;
- }
+ return 0;
+}
- return pos;
+const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode)
+{
+ static const char * const modes[] = {
+ [IEEE80211_CONN_MODE_S1G] = "S1G",
+ [IEEE80211_CONN_MODE_LEGACY] = "legacy",
+ [IEEE80211_CONN_MODE_HT] = "HT",
+ [IEEE80211_CONN_MODE_VHT] = "VHT",
+ [IEEE80211_CONN_MODE_HE] = "HE",
+ [IEEE80211_CONN_MODE_EHT] = "EHT",
+ };
+
+ if (WARN_ON(mode >= ARRAY_SIZE(modes)))
+ return "<out of range>";
+
+ return modes[mode] ?: "<missing string>";
+}
+
+enum ieee80211_conn_bw_limit
+ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IEEE80211_CONN_BW_LIMIT_20;
+ case NL80211_CHAN_WIDTH_40:
+ return IEEE80211_CONN_BW_LIMIT_40;
+ case NL80211_CHAN_WIDTH_80:
+ return IEEE80211_CONN_BW_LIMIT_80;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return IEEE80211_CONN_BW_LIMIT_160;
+ case NL80211_CHAN_WIDTH_320:
+ return IEEE80211_CONN_BW_LIMIT_320;
+ default:
+ WARN(1, "unhandled chandef width %d\n", chandef->width);
+ return IEEE80211_CONN_BW_LIMIT_20;
+ }
}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index b3a5c3e96a72..642891cafbaf 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -4,7 +4,7 @@
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -369,7 +369,7 @@ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
link_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
if (eht_cap->has_eht &&
- link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
info = eht_cap->eht_cap_elem.phy_cap_info[0];
if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) {
@@ -380,7 +380,7 @@ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
info = he_cap->he_cap_elem.phy_cap_info[0];
- if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
ret = IEEE80211_STA_RX_BW_40;
else
@@ -515,7 +515,7 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
if (WARN_ON(!link_conf))
bss_width = NL80211_CHAN_WIDTH_20_NOHT;
else
- bss_width = link_conf->chandef.width;
+ bss_width = link_conf->chanreq.oper.width;
rcu_read_unlock();
bw = ieee80211_sta_cap_rx_bw(link_sta);
@@ -541,15 +541,11 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
return bw;
}
-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
+void ieee80211_sta_init_nss(struct link_sta_info *link_sta)
{
u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss;
bool support_160;
- /* if we received a notification already don't overwrite it */
- if (link_sta->pub->rx_nss)
- return;
-
if (link_sta->pub->eht_cap.has_eht) {
int i;
const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp;
@@ -627,7 +623,15 @@ void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
rx_nss = max(vht_rx_nss, ht_rx_nss);
rx_nss = max(he_rx_nss, rx_nss);
rx_nss = max(eht_rx_nss, rx_nss);
- link_sta->pub->rx_nss = max_t(u8, 1, rx_nss);
+ rx_nss = max_t(u8, 1, rx_nss);
+ link_sta->capa_nss = rx_nss;
+
+ /* that shouldn't be set yet, but we can handle it anyway */
+ if (link_sta->op_mode_nss)
+ link_sta->pub->rx_nss =
+ min_t(u8, rx_nss, link_sta->op_mode_nss);
+ else
+ link_sta->pub->rx_nss = rx_nss;
}
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
@@ -637,7 +641,7 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
enum ieee80211_sta_rx_bandwidth new_bw;
struct sta_opmode_info sta_opmode = {};
u32 changed = 0;
- u8 nss, cur_nss;
+ u8 nss;
/* ignore - no support for BF yet */
if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
@@ -647,23 +651,17 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
- if (link_sta->pub->rx_nss != nss) {
- cur_nss = link_sta->pub->rx_nss;
- /* Reset rx_nss and call ieee80211_sta_set_rx_nss() which
- * will set the same to max nss value calculated based on capability.
- */
- link_sta->pub->rx_nss = 0;
- ieee80211_sta_set_rx_nss(link_sta);
- /* Do not allow an nss change to rx_nss greater than max_nss
- * negotiated and capped to APs capability during association.
- */
- if (nss <= link_sta->pub->rx_nss) {
- link_sta->pub->rx_nss = nss;
- sta_opmode.rx_nss = nss;
- changed |= IEEE80211_RC_NSS_CHANGED;
- sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ if (link_sta->op_mode_nss != nss) {
+ if (nss <= link_sta->capa_nss) {
+ link_sta->op_mode_nss = nss;
+
+ if (nss != link_sta->pub->rx_nss) {
+ link_sta->pub->rx_nss = nss;
+ changed |= IEEE80211_RC_NSS_CHANGED;
+ sta_opmode.rx_nss = link_sta->pub->rx_nss;
+ sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ }
} else {
- link_sta->pub->rx_nss = cur_nss;
pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d",
link_sta->pub->addr, nss);
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 94dae7cb6dbd..e40529b8c5c9 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -315,7 +315,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
* Calculate AAD for CCMP/GCMP, returning qos_tid since we
* need that in CCMP also for b_0.
*/
-static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
+static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad, bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 mask_fc;
@@ -340,7 +340,14 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
len_a += 6;
if (ieee80211_is_data_qos(hdr->frame_control)) {
- qos_tid = ieee80211_get_tid(hdr);
+ qos_tid = *ieee80211_get_qos_ctl(hdr);
+
+ if (spp_amsdu)
+ qos_tid &= IEEE80211_QOS_CTL_TID_MASK |
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ else
+ qos_tid &= IEEE80211_QOS_CTL_TID_MASK;
+
mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
len_a += 2;
} else {
@@ -369,10 +376,11 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
return qos_tid;
}
-static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
+static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad,
+ bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- u8 qos_tid = ccmp_gcmp_aad(skb, aad);
+ u8 qos_tid = ccmp_gcmp_aad(skb, aad, spp_amsdu);
/* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
* mode authentication are not allowed to collide, yet both are derived
@@ -479,7 +487,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
return 0;
pos += IEEE80211_CCMP_HDR_LEN;
- ccmp_special_blocks(skb, pn, b_0, aad);
+ ccmp_special_blocks(skb, pn, b_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
skb_put(skb, mic_len));
}
@@ -557,7 +566,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
u8 aad[2 * AES_BLOCK_SIZE];
u8 b_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- ccmp_special_blocks(skb, pn, b_0, aad);
+ ccmp_special_blocks(skb, pn, b_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
if (ieee80211_aes_ccm_decrypt(
key->u.ccmp.tfm, b_0, aad,
@@ -581,7 +591,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
return RX_CONTINUE;
}
-static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
+static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad,
+ bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -591,7 +602,7 @@ static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
j_0[14] = 0;
j_0[AES_BLOCK_SIZE - 1] = 0x01;
- ccmp_gcmp_aad(skb, aad);
+ ccmp_gcmp_aad(skb, aad, spp_amsdu);
}
static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
@@ -680,7 +691,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
return 0;
pos += IEEE80211_GCMP_HDR_LEN;
- gcmp_special_blocks(skb, pn, j_0, aad);
+ gcmp_special_blocks(skb, pn, j_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
skb_put(skb, IEEE80211_GCMP_MIC_LEN));
}
@@ -753,7 +765,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
u8 aad[2 * AES_BLOCK_SIZE];
u8 j_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- gcmp_special_blocks(skb, pn, j_0, aad);
+ gcmp_special_blocks(skb, pn, j_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
if (ieee80211_aes_gcm_decrypt(
key->u.gcmp.tfm, j_0, aad,
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 8d2eabc71bbe..f13b07ebfb98 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -265,19 +265,27 @@ fail:
return -ENOMEM;
}
+static void mac802154_llsec_key_del_rcu(struct rcu_head *rcu)
+{
+ struct ieee802154_llsec_key_entry *pos;
+ struct mac802154_llsec_key *mkey;
+
+ pos = container_of(rcu, struct ieee802154_llsec_key_entry, rcu);
+ mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+
+ llsec_key_put(mkey);
+ kfree_sensitive(pos);
+}
+
int mac802154_llsec_key_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_key_entry *pos;
list_for_each_entry(pos, &sec->table.keys, list) {
- struct mac802154_llsec_key *mkey;
-
- mkey = container_of(pos->key, struct mac802154_llsec_key, key);
-
if (llsec_key_id_equal(&pos->id, key)) {
list_del_rcu(&pos->list);
- llsec_key_put(mkey);
+ call_rcu(&pos->rcu, mac802154_llsec_key_del_rcu);
return 0;
}
}
diff --git a/net/mctp/Kconfig b/net/mctp/Kconfig
index 3a5c0e70da77..d8d3413a37f7 100644
--- a/net/mctp/Kconfig
+++ b/net/mctp/Kconfig
@@ -14,6 +14,7 @@ menuconfig MCTP
config MCTP_TEST
bool "MCTP core tests" if !KUNIT_ALL_TESTS
+ select MCTP_FLOWS
depends on MCTP=y && KUNIT=y
default KUNIT_ALL_TESTS
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index f6be58b68c6f..de52a9191da0 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -350,30 +350,102 @@ static int mctp_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
}
-static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
+/* helpers for reading/writing the tag ioc, handling compatibility across the
+ * two versions, and some basic API error checking
+ */
+static int mctp_ioctl_tag_copy_from_user(unsigned long arg,
+ struct mctp_ioc_tag_ctl2 *ctl,
+ bool tagv2)
+{
+ struct mctp_ioc_tag_ctl ctl_compat;
+ unsigned long size;
+ void *ptr;
+ int rc;
+
+ if (tagv2) {
+ size = sizeof(*ctl);
+ ptr = ctl;
+ } else {
+ size = sizeof(ctl_compat);
+ ptr = &ctl_compat;
+ }
+
+ rc = copy_from_user(ptr, (void __user *)arg, size);
+ if (rc)
+ return -EFAULT;
+
+ if (!tagv2) {
+ /* compat, using defaults for new fields */
+ ctl->net = MCTP_INITIAL_DEFAULT_NET;
+ ctl->peer_addr = ctl_compat.peer_addr;
+ ctl->local_addr = MCTP_ADDR_ANY;
+ ctl->flags = ctl_compat.flags;
+ ctl->tag = ctl_compat.tag;
+ }
+
+ if (ctl->flags)
+ return -EINVAL;
+
+ if (ctl->local_addr != MCTP_ADDR_ANY &&
+ ctl->local_addr != MCTP_ADDR_NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mctp_ioctl_tag_copy_to_user(unsigned long arg,
+ struct mctp_ioc_tag_ctl2 *ctl,
+ bool tagv2)
+{
+ struct mctp_ioc_tag_ctl ctl_compat;
+ unsigned long size;
+ void *ptr;
+ int rc;
+
+ if (tagv2) {
+ ptr = ctl;
+ size = sizeof(*ctl);
+ } else {
+ ctl_compat.peer_addr = ctl->peer_addr;
+ ctl_compat.tag = ctl->tag;
+ ctl_compat.flags = ctl->flags;
+
+ ptr = &ctl_compat;
+ size = sizeof(ctl_compat);
+ }
+
+ rc = copy_to_user((void __user *)arg, ptr, size);
+ if (rc)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mctp_ioctl_alloctag(struct mctp_sock *msk, bool tagv2,
+ unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
struct mctp_sk_key *key = NULL;
- struct mctp_ioc_tag_ctl ctl;
+ struct mctp_ioc_tag_ctl2 ctl;
unsigned long flags;
u8 tag;
+ int rc;
- if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
- return -EFAULT;
+ rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
+ if (rc)
+ return rc;
if (ctl.tag)
return -EINVAL;
- if (ctl.flags)
- return -EINVAL;
-
- key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY,
- true, &tag);
+ key = mctp_alloc_local_tag(msk, ctl.net, MCTP_ADDR_ANY,
+ ctl.peer_addr, true, &tag);
if (IS_ERR(key))
return PTR_ERR(key);
ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
- if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) {
+ rc = mctp_ioctl_tag_copy_to_user(arg, &ctl, tagv2);
+ if (rc) {
unsigned long fl2;
/* Unwind our key allocation: the keys list lock needs to be
* taken before the individual key locks, and we need a valid
@@ -385,28 +457,27 @@ static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
__mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
mctp_key_unref(key);
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
- return -EFAULT;
+ return rc;
}
mctp_key_unref(key);
return 0;
}
-static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
+static int mctp_ioctl_droptag(struct mctp_sock *msk, bool tagv2,
+ unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
- struct mctp_ioc_tag_ctl ctl;
+ struct mctp_ioc_tag_ctl2 ctl;
unsigned long flags, fl2;
struct mctp_sk_key *key;
struct hlist_node *tmp;
int rc;
u8 tag;
- if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
- return -EFAULT;
-
- if (ctl.flags)
- return -EINVAL;
+ rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
+ if (rc)
+ return rc;
/* Must be a local tag, TO set, preallocated */
if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
@@ -422,6 +493,7 @@ static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
*/
spin_lock_irqsave(&key->lock, fl2);
if (key->manual_alloc &&
+ ctl.net == key->net &&
ctl.peer_addr == key->peer_addr &&
tag == key->tag) {
__mctp_key_remove(key, net, fl2,
@@ -439,12 +511,17 @@ static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
+ bool tagv2 = false;
switch (cmd) {
+ case SIOCMCTPALLOCTAG2:
case SIOCMCTPALLOCTAG:
- return mctp_ioctl_alloctag(msk, arg);
+ tagv2 = cmd == SIOCMCTPALLOCTAG2;
+ return mctp_ioctl_alloctag(msk, tagv2, arg);
case SIOCMCTPDROPTAG:
- return mctp_ioctl_droptag(msk, arg);
+ case SIOCMCTPDROPTAG2:
+ tagv2 = cmd == SIOCMCTPDROPTAG2;
+ return mctp_ioctl_droptag(msk, tagv2, arg);
}
return -EINVAL;
diff --git a/net/mctp/route.c b/net/mctp/route.c
index ceee44ea09d9..eefd7834d9a0 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -73,13 +73,50 @@ static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
return NULL;
}
-static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
- mctp_eid_t peer, u8 tag)
+/* A note on the key allocations.
+ *
+ * struct net->mctp.keys contains our set of currently-allocated keys for
+ * MCTP tag management. The lookup tuple for these is the peer EID,
+ * local EID and MCTP tag.
+ *
+ * In some cases, the peer EID may be MCTP_EID_ANY: for example, when a
+ * broadcast message is sent, we may receive responses from any peer EID.
+ * Because the broadcast dest address is equivalent to ANY, we create
+ * a key with (local = local-eid, peer = ANY). This allows a match on the
+ * incoming broadcast responses from any peer.
+ *
+ * We perform lookups when packets are received, and when tags are allocated
+ * in two scenarios:
+ *
+ * - when a packet is sent, with a locally-owned tag: we need to find an
+ * unused tag value for the (local, peer) EID pair.
+ *
+ * - when a tag is manually allocated: we need to find an unused tag value
+ * for the peer EID, but don't have a specific local EID at that stage.
+ *
+ * in the latter case, on successful allocation, we end up with a tag with
+ * (local = ANY, peer = peer-eid).
+ *
+ * So, the key set allows both a local EID of ANY, as well as a peer EID of
+ * ANY in the lookup tuple. Both may be ANY if we prealloc for a broadcast.
+ * The matching (in mctp_key_match()) during lookup allows the match value to
+ * be ANY in either the dest or source addresses.
+ *
+ * When allocating (+ inserting) a tag, we need to check for conflicts amongst
+ * the existing tag set. This requires macthing either exactly on the local
+ * and peer addresses, or either being ANY.
+ */
+
+static bool mctp_key_match(struct mctp_sk_key *key, unsigned int net,
+ mctp_eid_t local, mctp_eid_t peer, u8 tag)
{
+ if (key->net != net)
+ return false;
+
if (!mctp_address_matches(key->local_addr, local))
return false;
- if (key->peer_addr != peer)
+ if (!mctp_address_matches(key->peer_addr, peer))
return false;
if (key->tag != tag)
@@ -92,7 +129,7 @@ static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
* key exists.
*/
static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
- mctp_eid_t peer,
+ unsigned int netid, mctp_eid_t peer,
unsigned long *irqflags)
__acquires(&key->lock)
{
@@ -108,7 +145,7 @@ static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry(key, &net->mctp.keys, hlist) {
- if (!mctp_key_match(key, mh->dest, peer, tag))
+ if (!mctp_key_match(key, netid, mh->dest, peer, tag))
continue;
spin_lock(&key->lock);
@@ -131,6 +168,7 @@ static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
}
static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
+ unsigned int net,
mctp_eid_t local, mctp_eid_t peer,
u8 tag, gfp_t gfp)
{
@@ -140,6 +178,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
if (!key)
return NULL;
+ key->net = net;
key->peer_addr = peer;
key->local_addr = local;
key->tag = tag;
@@ -185,8 +224,8 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
}
hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
- if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
- key->tag)) {
+ if (mctp_key_match(tmp, key->net, key->local_addr,
+ key->peer_addr, key->tag)) {
spin_lock(&tmp->lock);
if (tmp->valid)
rc = -EEXIST;
@@ -327,6 +366,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
struct mctp_sock *msk;
struct mctp_hdr *mh;
+ unsigned int netid;
unsigned long f;
u8 tag, flags;
int rc;
@@ -345,6 +385,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
/* grab header, advance data ptr */
mh = mctp_hdr(skb);
+ netid = mctp_cb(skb)->net;
skb_pull(skb, sizeof(struct mctp_hdr));
if (mh->ver != 1)
@@ -358,7 +399,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
/* lookup socket / reasm context, exactly matching (src,dest,tag).
* we hold a ref on the key, and key->lock held.
*/
- key = mctp_lookup_key(net, skb, mh->src, &f);
+ key = mctp_lookup_key(net, skb, netid, mh->src, &f);
if (flags & MCTP_HDR_FLAG_SOM) {
if (key) {
@@ -368,8 +409,12 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* key lookup to find the socket, but don't use this
* key for reassembly - we'll create a more specific
* one for future packets if required (ie, !EOM).
+ *
+ * this lookup requires key->peer to be MCTP_ADDR_ANY,
+ * it doesn't match just any key->peer.
*/
- any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
+ any_key = mctp_lookup_key(net, skb, netid,
+ MCTP_ADDR_ANY, &f);
if (any_key) {
msk = container_of(any_key->sk,
struct mctp_sock, sk);
@@ -406,7 +451,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* packets for this message
*/
if (!key) {
- key = mctp_key_alloc(msk, mh->dest, mh->src,
+ key = mctp_key_alloc(msk, netid, mh->dest, mh->src,
tag, GFP_ATOMIC);
if (!key) {
rc = -ENOMEM;
@@ -596,11 +641,12 @@ static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key,
refcount_inc(&key->refs);
}
-/* Allocate a locally-owned tag value for (saddr, daddr), and reserve
+/* Allocate a locally-owned tag value for (local, peer), and reserve
* it for the socket msk
*/
struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
- mctp_eid_t daddr, mctp_eid_t saddr,
+ unsigned int netid,
+ mctp_eid_t local, mctp_eid_t peer,
bool manual, u8 *tagp)
{
struct net *net = sock_net(&msk->sk);
@@ -610,11 +656,11 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
u8 tagbits;
/* for NULL destination EIDs, we may get a response from any peer */
- if (daddr == MCTP_ADDR_NULL)
- daddr = MCTP_ADDR_ANY;
+ if (peer == MCTP_ADDR_NULL)
+ peer = MCTP_ADDR_ANY;
/* be optimistic, alloc now */
- key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL);
+ key = mctp_key_alloc(msk, netid, local, peer, 0, GFP_KERNEL);
if (!key)
return ERR_PTR(-ENOMEM);
@@ -631,12 +677,24 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
* lock held, they don't change over the lifetime of the key.
*/
+ /* tags are net-specific */
+ if (tmp->net != netid)
+ continue;
+
/* if we don't own the tag, it can't conflict */
if (tmp->tag & MCTP_HDR_FLAG_TO)
continue;
- if (!(mctp_address_matches(tmp->peer_addr, daddr) &&
- mctp_address_matches(tmp->local_addr, saddr)))
+ /* Since we're avoiding conflicting entries, match peer and
+ * local addresses, including with a wildcard on ANY. See
+ * 'A note on key allocations' for background.
+ */
+ if (peer != MCTP_ADDR_ANY &&
+ !mctp_address_matches(tmp->peer_addr, peer))
+ continue;
+
+ if (local != MCTP_ADDR_ANY &&
+ !mctp_address_matches(tmp->local_addr, local))
continue;
spin_lock(&tmp->lock);
@@ -671,6 +729,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
}
static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
+ unsigned int netid,
mctp_eid_t daddr,
u8 req_tag, u8 *tagp)
{
@@ -685,6 +744,9 @@ static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
spin_lock_irqsave(&mns->keys_lock, flags);
hlist_for_each_entry(tmp, &mns->keys, hlist) {
+ if (tmp->net != netid)
+ continue;
+
if (tmp->tag != req_tag)
continue;
@@ -843,6 +905,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
/* copy message payload */
skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
+ /* we need to copy the extensions, for MCTP flow data */
+ skb_ext_copy(skb2, skb);
+
/* do route */
rc = rt->output(rt, skb2);
if (rc)
@@ -865,6 +930,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct mctp_sk_key *key;
struct mctp_hdr *hdr;
unsigned long flags;
+ unsigned int netid;
unsigned int mtu;
mctp_eid_t saddr;
bool ext_rt;
@@ -916,16 +982,17 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
rc = 0;
}
spin_unlock_irqrestore(&rt->dev->addrs_lock, flags);
+ netid = READ_ONCE(rt->dev->net);
if (rc)
goto out_release;
if (req_tag & MCTP_TAG_OWNER) {
if (req_tag & MCTP_TAG_PREALLOC)
- key = mctp_lookup_prealloc_tag(msk, daddr,
+ key = mctp_lookup_prealloc_tag(msk, netid, daddr,
req_tag, &tag);
else
- key = mctp_alloc_local_tag(msk, daddr, saddr,
+ key = mctp_alloc_local_tag(msk, netid, saddr, daddr,
false, &tag);
if (IS_ERR(key)) {
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 92ea4158f7fc..77e5dd422258 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -79,6 +79,16 @@ static void mctp_test_route_destroy(struct kunit *test,
kfree_rcu(&rt->rt, rcu);
}
+static void mctp_test_skb_set_dev(struct sk_buff *skb,
+ struct mctp_test_dev *dev)
+{
+ struct mctp_skb_cb *cb;
+
+ cb = mctp_cb(skb);
+ cb->net = READ_ONCE(dev->mdev->net);
+ skb->dev = dev->ndev;
+}
+
static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
unsigned int data_len)
{
@@ -91,6 +101,7 @@ static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
if (!skb)
return NULL;
+ __mctp_cb(skb);
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
buf = skb_put(skb, data_len);
@@ -111,6 +122,7 @@ static struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
if (!skb)
return NULL;
+ __mctp_cb(skb);
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
memcpy(skb_put(skb, data_len), data, data_len);
@@ -249,8 +261,6 @@ static void mctp_test_rx_input(struct kunit *test)
skb = mctp_test_create_skb(&params->hdr, 1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- __mctp_cb(skb);
-
mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL);
KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input);
@@ -283,7 +293,8 @@ KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests,
static void __mctp_route_test_init(struct kunit *test,
struct mctp_test_dev **devp,
struct mctp_test_route **rtp,
- struct socket **sockp)
+ struct socket **sockp,
+ unsigned int netid)
{
struct sockaddr_mctp addr = {0};
struct mctp_test_route *rt;
@@ -293,6 +304,8 @@ static void __mctp_route_test_init(struct kunit *test,
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ if (netid != MCTP_NET_ANY)
+ WRITE_ONCE(dev->mdev->net, netid);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
@@ -301,7 +314,7 @@ static void __mctp_route_test_init(struct kunit *test,
KUNIT_ASSERT_EQ(test, rc, 0);
addr.smctp_family = AF_MCTP;
- addr.smctp_network = MCTP_NET_ANY;
+ addr.smctp_network = netid;
addr.smctp_addr.s_addr = 8;
addr.smctp_type = 0;
rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
@@ -339,13 +352,12 @@ static void mctp_test_route_input_sk(struct kunit *test)
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock);
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
skb = mctp_test_create_skb_data(&params->hdr, &params->type);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
@@ -410,15 +422,14 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test)
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock);
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
for (i = 0; i < params->n_hdrs; i++) {
c = i;
skb = mctp_test_create_skb_data(&params->hdrs[i], &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
}
@@ -544,6 +555,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
struct mctp_sock *msk;
struct socket *sock;
unsigned long flags;
+ unsigned int net;
int rc;
u8 c;
@@ -551,6 +563,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ net = READ_ONCE(dev->mdev->net);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
@@ -562,8 +575,9 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
mns = &sock_net(sock->sk)->mctp;
/* set the incoming tag according to test params */
- key = mctp_key_alloc(msk, params->key_local_addr, params->key_peer_addr,
- params->key_tag, GFP_KERNEL);
+ key = mctp_key_alloc(msk, net, params->key_local_addr,
+ params->key_peer_addr, params->key_tag,
+ GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, key);
@@ -576,8 +590,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
skb = mctp_test_create_skb_data(&params->hdr, &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
@@ -665,6 +678,373 @@ static void mctp_route_input_sk_keys_to_desc(
KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests,
mctp_route_input_sk_keys_to_desc);
+struct test_net {
+ unsigned int netid;
+ struct mctp_test_dev *dev;
+ struct mctp_test_route *rt;
+ struct socket *sock;
+ struct sk_buff *skb;
+ struct mctp_sk_key *key;
+ struct {
+ u8 type;
+ unsigned int data;
+ } msg;
+};
+
+static void
+mctp_test_route_input_multiple_nets_bind_init(struct kunit *test,
+ struct test_net *t)
+{
+ struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1) | FL_TO);
+
+ t->msg.data = t->netid;
+
+ __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+
+ t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
+ mctp_test_skb_set_dev(t->skb, t->dev);
+}
+
+static void
+mctp_test_route_input_multiple_nets_bind_fini(struct kunit *test,
+ struct test_net *t)
+{
+ __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+}
+
+/* Test that skbs from different nets (otherwise identical) get routed to their
+ * corresponding socket via the sockets' bind()
+ */
+static void mctp_test_route_input_multiple_nets_bind(struct kunit *test)
+{
+ struct sk_buff *rx_skb1, *rx_skb2;
+ struct test_net t1, t2;
+ int rc;
+
+ t1.netid = 1;
+ t2.netid = 2;
+
+ t1.msg.type = 0;
+ t2.msg.type = 0;
+
+ mctp_test_route_input_multiple_nets_bind_init(test, &t1);
+ mctp_test_route_input_multiple_nets_bind_init(test, &t2);
+
+ rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+ rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
+ KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
+ t1.netid);
+ kfree_skb(rx_skb1);
+
+ rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
+ KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
+ t2.netid);
+ kfree_skb(rx_skb2);
+
+ mctp_test_route_input_multiple_nets_bind_fini(test, &t1);
+ mctp_test_route_input_multiple_nets_bind_fini(test, &t2);
+}
+
+static void
+mctp_test_route_input_multiple_nets_key_init(struct kunit *test,
+ struct test_net *t)
+{
+ struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1));
+ struct mctp_sock *msk;
+ struct netns_mctp *mns;
+ unsigned long flags;
+
+ t->msg.data = t->netid;
+
+ __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+
+ msk = container_of(t->sock->sk, struct mctp_sock, sk);
+
+ t->key = mctp_key_alloc(msk, t->netid, hdr.dest, hdr.src, 1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
+
+ mns = &sock_net(t->sock->sk)->mctp;
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ mctp_reserve_tag(&init_net, t->key, msk);
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
+ t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
+ mctp_test_skb_set_dev(t->skb, t->dev);
+}
+
+static void
+mctp_test_route_input_multiple_nets_key_fini(struct kunit *test,
+ struct test_net *t)
+{
+ mctp_key_unref(t->key);
+ __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+}
+
+/* test that skbs from different nets (otherwise identical) get routed to their
+ * corresponding socket via the sk_key
+ */
+static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
+{
+ struct sk_buff *rx_skb1, *rx_skb2;
+ struct test_net t1, t2;
+ int rc;
+
+ t1.netid = 1;
+ t2.netid = 2;
+
+ /* use type 1 which is not bound */
+ t1.msg.type = 1;
+ t2.msg.type = 1;
+
+ mctp_test_route_input_multiple_nets_key_init(test, &t1);
+ mctp_test_route_input_multiple_nets_key_init(test, &t2);
+
+ rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+ rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
+ KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
+ t1.netid);
+ kfree_skb(rx_skb1);
+
+ rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
+ KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
+ t2.netid);
+ kfree_skb(rx_skb2);
+
+ mctp_test_route_input_multiple_nets_key_fini(test, &t1);
+ mctp_test_route_input_multiple_nets_key_fini(test, &t2);
+}
+
+#if IS_ENABLED(CONFIG_MCTP_FLOWS)
+
+static void mctp_test_flow_init(struct kunit *test,
+ struct mctp_test_dev **devp,
+ struct mctp_test_route **rtp,
+ struct socket **sock,
+ struct sk_buff **skbp,
+ unsigned int len)
+{
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+
+ /* we have a slightly odd routing setup here; the test route
+ * is for EID 8, which is our local EID. We don't do a routing
+ * lookup, so that's fine - all we require is a path through
+ * mctp_local_output, which will call rt->output on whatever
+ * route we provide
+ */
+ __mctp_route_test_init(test, &dev, &rt, sock, MCTP_NET_ANY);
+
+ /* Assign a single EID. ->addrs is freed on mctp netdev release */
+ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
+ dev->mdev->num_addrs = 1;
+ dev->mdev->addrs[0] = 8;
+
+ skb = alloc_skb(len + sizeof(struct mctp_hdr) + 1, GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, skb);
+ __mctp_cb(skb);
+ skb_reserve(skb, sizeof(struct mctp_hdr) + 1);
+ memset(skb_put(skb, len), 0, len);
+
+ /* take a ref for the route, we'll decrement in local output */
+ refcount_inc(&rt->rt.refs);
+
+ *devp = dev;
+ *rtp = rt;
+ *skbp = skb;
+}
+
+static void mctp_test_flow_fini(struct kunit *test,
+ struct mctp_test_dev *dev,
+ struct mctp_test_route *rt,
+ struct socket *sock)
+{
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+/* test that an outgoing skb has the correct MCTP extension data set */
+static void mctp_test_packet_flow(struct kunit *test)
+{
+ struct sk_buff *skb, *skb2;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_flow *flow;
+ struct socket *sock;
+ u8 dst = 8;
+ int n, rc;
+
+ mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 30);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ n = rt->pkts.qlen;
+ KUNIT_ASSERT_EQ(test, n, 1);
+
+ skb2 = skb_dequeue(&rt->pkts);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2);
+
+ flow = skb_ext_find(skb2, SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow->key);
+ KUNIT_ASSERT_PTR_EQ(test, flow->key->sk, sock->sk);
+
+ kfree_skb(skb2);
+ mctp_test_flow_fini(test, dev, rt, sock);
+}
+
+/* test that outgoing skbs, after fragmentation, all have the correct MCTP
+ * extension data set.
+ */
+static void mctp_test_fragment_flow(struct kunit *test)
+{
+ struct mctp_flow *flows[2];
+ struct sk_buff *tx_skbs[2];
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+ struct socket *sock;
+ u8 dst = 8;
+ int n, rc;
+
+ mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 100);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ n = rt->pkts.qlen;
+ KUNIT_ASSERT_EQ(test, n, 2);
+
+ /* both resulting packets should have the same flow data */
+ tx_skbs[0] = skb_dequeue(&rt->pkts);
+ tx_skbs[1] = skb_dequeue(&rt->pkts);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[1]);
+
+ flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key);
+ KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk);
+
+ flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]);
+ KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key);
+
+ kfree_skb(tx_skbs[0]);
+ kfree_skb(tx_skbs[1]);
+ mctp_test_flow_fini(test, dev, rt, sock);
+}
+
+#else
+static void mctp_test_packet_flow(struct kunit *test)
+{
+ kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
+}
+
+static void mctp_test_fragment_flow(struct kunit *test)
+{
+ kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
+}
+#endif
+
+/* Test that outgoing skbs cause a suitable tag to be created */
+static void mctp_test_route_output_key_create(struct kunit *test)
+{
+ const unsigned int netid = 50;
+ const u8 dst = 26, src = 15;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_sk_key *key;
+ struct netns_mctp *mns;
+ unsigned long flags;
+ struct socket *sock;
+ struct sk_buff *skb;
+ bool empty, single;
+ const int len = 2;
+ int rc;
+
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ WRITE_ONCE(dev->mdev->net, netid);
+
+ rt = mctp_test_create_route(&init_net, dev->mdev, dst, 68);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+
+ rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
+ dev->mdev->num_addrs = 1;
+ dev->mdev->addrs[0] = src;
+
+ skb = alloc_skb(sizeof(struct mctp_hdr) + 1 + len, GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, skb);
+ __mctp_cb(skb);
+ skb_reserve(skb, sizeof(struct mctp_hdr) + 1 + len);
+ memset(skb_put(skb, len), 0, len);
+
+ refcount_inc(&rt->rt.refs);
+
+ mns = &sock_net(sock->sk)->mctp;
+
+ /* We assume we're starting from an empty keys list, which requires
+ * preceding tests to clean up correctly!
+ */
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ empty = hlist_empty(&mns->keys);
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+ KUNIT_ASSERT_TRUE(test, empty);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ key = NULL;
+ single = false;
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ if (!hlist_empty(&mns->keys)) {
+ key = hlist_entry(mns->keys.first, struct mctp_sk_key, hlist);
+ single = hlist_is_singular_node(&key->hlist, &mns->keys);
+ }
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ KUNIT_ASSERT_NOT_NULL(test, key);
+ KUNIT_ASSERT_TRUE(test, single);
+
+ KUNIT_EXPECT_EQ(test, key->net, netid);
+ KUNIT_EXPECT_EQ(test, key->local_addr, src);
+ KUNIT_EXPECT_EQ(test, key->peer_addr, dst);
+ /* key has incoming tag, so inverse of what we sent */
+ KUNIT_EXPECT_FALSE(test, key->tag & MCTP_TAG_OWNER);
+
+ sock_release(sock);
+ mctp_test_route_destroy(test, rt);
+ mctp_test_destroy_dev(dev);
+}
+
static struct kunit_case mctp_test_cases[] = {
KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params),
KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params),
@@ -673,6 +1053,11 @@ static struct kunit_case mctp_test_cases[] = {
mctp_route_input_sk_reasm_gen_params),
KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys,
mctp_route_input_sk_keys_gen_params),
+ KUNIT_CASE(mctp_test_route_input_multiple_nets_bind),
+ KUNIT_CASE(mctp_test_route_input_multiple_nets_key),
+ KUNIT_CASE(mctp_test_packet_flow),
+ KUNIT_CASE(mctp_test_fragment_flow),
+ KUNIT_CASE(mctp_test_route_output_key_create),
{}
};
diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
index e03ba66bbe18..565763eb0211 100644
--- a/net/mctp/test/utils.c
+++ b/net/mctp/test/utils.c
@@ -4,6 +4,7 @@
#include <linux/mctp.h>
#include <linux/if_arp.h>
+#include <net/mctp.h>
#include <net/mctpdevice.h>
#include <net/pkt_sched.h>
@@ -54,6 +55,7 @@ struct mctp_test_dev *mctp_test_create_dev(void)
rcu_read_lock();
dev->mdev = __mctp_dev_get(ndev);
+ dev->mdev->net = mctp_default_net(dev_net(ndev));
rcu_read_unlock();
return dev;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1af29af65388..6dab883a08dd 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2179,7 +2179,9 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct mpls_route __rcu **platform_label;
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
unsigned int flags = NLM_F_MULTI;
size_t platform_labels;
unsigned int index;
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 533d082f0701..45d1e6a157fc 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -27,6 +27,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
__be16 mpls_protocol;
unsigned int mpls_hlen;
+ if (!skb_inner_network_header_was_set(skb))
+ goto out;
+
skb_reset_network_header(skb);
mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index ef59e25dc482..8fc790f2a01b 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -55,8 +55,6 @@ static int mpls_xmit(struct sk_buff *skb)
out_dev = dst->dev;
net = dev_net(out_dev);
- skb_orphan(skb);
-
if (!mpls_output_possible(out_dev) ||
!dst->lwtstate || skb_warn_if_lro(skb))
goto drop;
diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
index 7017dd60659d..3ae46b545d2c 100644
--- a/net/mptcp/diag.c
+++ b/net/mptcp/diag.c
@@ -10,7 +10,6 @@
#include <linux/net.h>
#include <linux/inet_diag.h>
#include <net/netlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index 5409c2ea3f57..0566dd793810 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -10,7 +10,6 @@
#include <linux/net.h>
#include <linux/inet_diag.h>
#include <net/netlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
@@ -225,6 +224,7 @@ static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
}
static const struct inet_diag_handler mptcp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = mptcp_diag_dump,
.dump_one = mptcp_diag_dump_one,
.idiag_get_info = mptcp_diag_get_info,
diff --git a/net/mptcp/mptcp_pm_gen.c b/net/mptcp/mptcp_pm_gen.c
index 670da7822e6c..c30a2a90a192 100644
--- a/net/mptcp/mptcp_pm_gen.c
+++ b/net/mptcp/mptcp_pm_gen.c
@@ -32,8 +32,9 @@ const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1]
};
/* MPTCP_PM_CMD_GET_ADDR - do */
-const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1] = {
- [MPTCP_PM_ENDPOINT_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ATTR_TOKEN + 1] = {
+ [MPTCP_PM_ATTR_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
};
/* MPTCP_PM_CMD_FLUSH_ADDRS - do */
@@ -110,7 +111,7 @@ const struct genl_ops mptcp_pm_nl_ops[11] = {
.doit = mptcp_pm_nl_get_addr_doit,
.dumpit = mptcp_pm_nl_get_addr_dumpit,
.policy = mptcp_pm_get_addr_nl_policy,
- .maxattr = MPTCP_PM_ENDPOINT_ADDR,
+ .maxattr = MPTCP_PM_ATTR_TOKEN,
.flags = GENL_UNS_ADMIN_PERM,
},
{
diff --git a/net/mptcp/mptcp_pm_gen.h b/net/mptcp/mptcp_pm_gen.h
index ac9fc7225b6a..e24258f6f819 100644
--- a/net/mptcp/mptcp_pm_gen.h
+++ b/net/mptcp/mptcp_pm_gen.h
@@ -18,7 +18,7 @@ extern const struct nla_policy mptcp_pm_add_addr_nl_policy[MPTCP_PM_ENDPOINT_ADD
extern const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
-extern const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
+extern const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ATTR_TOKEN + 1];
extern const struct nla_policy mptcp_pm_flush_addrs_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 63fc0758c22d..27ca42c77b02 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -689,8 +689,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
if (!echo) {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDRTX);
- opts->ahmac = add_addr_generate_hmac(msk->local_key,
- msk->remote_key,
+ opts->ahmac = add_addr_generate_hmac(READ_ONCE(msk->local_key),
+ READ_ONCE(msk->remote_key),
&opts->addr);
} else {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX);
@@ -792,7 +792,7 @@ static bool mptcp_established_options_fastclose(struct sock *sk,
*size = TCPOLEN_MPTCP_FASTCLOSE;
opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
- opts->rcvr_key = msk->remote_key;
+ opts->rcvr_key = READ_ONCE(msk->remote_key);
pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
@@ -1031,7 +1031,7 @@ u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq)
static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una)
{
msk->bytes_acked += new_snd_una - msk->snd_una;
- msk->snd_una = new_snd_una;
+ WRITE_ONCE(msk->snd_una, new_snd_una);
}
static void ack_update_msk(struct mptcp_sock *msk,
@@ -1058,10 +1058,10 @@ static void ack_update_msk(struct mptcp_sock *msk,
new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
if (after64(new_wnd_end, msk->wnd_end))
- msk->wnd_end = new_wnd_end;
+ WRITE_ONCE(msk->wnd_end, new_wnd_end);
/* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
- if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
+ if (after64(msk->wnd_end, snd_nxt))
__mptcp_check_push(sk, ssk);
if (after64(new_snd_una, old_snd_una)) {
@@ -1072,7 +1072,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
trace_ack_update_msk(mp_opt->data_ack,
old_snd_una, new_snd_una,
- new_wnd_end, msk->wnd_end);
+ new_wnd_end, READ_ONCE(msk->wnd_end));
}
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
@@ -1100,8 +1100,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
if (mp_opt->echo)
return true;
- hmac = add_addr_generate_hmac(msk->remote_key,
- msk->local_key,
+ hmac = add_addr_generate_hmac(READ_ONCE(msk->remote_key),
+ READ_ONCE(msk->local_key),
&mp_opt->addr);
pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
@@ -1148,7 +1148,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
if (unlikely(mp_opt.suboptions != OPTION_MPTCP_DSS)) {
if ((mp_opt.suboptions & OPTION_MPTCP_FASTCLOSE) &&
- msk->local_key == mp_opt.rcvr_key) {
+ READ_ONCE(msk->local_key) == mp_opt.rcvr_key) {
WRITE_ONCE(msk->rcv_fastclose, true);
mptcp_schedule_work((struct sock *)msk);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSERX);
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 4ae19113b8eb..55406720c607 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -6,7 +6,6 @@
#define pr_fmt(fmt) "MPTCP: " fmt
#include <linux/kernel.h>
-#include <net/tcp.h>
#include <net/mptcp.h>
#include "protocol.h"
@@ -77,7 +76,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
+ pr_debug("msk=%p, token=%u side=%d", msk, READ_ONCE(msk->token), server_side);
WRITE_ONCE(pm->server_side, server_side);
mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
@@ -441,13 +440,27 @@ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id
return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex);
}
-int mptcp_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup)
+int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info)
{
- if (token)
- return mptcp_userspace_pm_set_flags(net, token, loc, rem, bkup);
- return mptcp_pm_nl_set_flags(net, loc, bkup);
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_get_addr(skb, info);
+ return mptcp_pm_nl_get_addr(skb, info);
+}
+
+int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_dump_addr(msg, cb);
+ return mptcp_pm_nl_dump_addr(msg, cb);
+}
+
+int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_set_flags(skb, info);
+ return mptcp_pm_nl_set_flags(skb, info);
}
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 58d17d9604e7..5c17d39146ea 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -8,19 +8,13 @@
#include <linux/inet.h>
#include <linux/kernel.h>
-#include <net/tcp.h>
#include <net/inet_common.h>
#include <net/netns/generic.h>
#include <net/mptcp.h>
-#include <net/genetlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
#include "mib.h"
-/* forward declaration */
-static struct genl_family mptcp_genl_family;
-
static int pm_nl_pernet_id;
struct mptcp_pm_add_entry {
@@ -505,15 +499,12 @@ __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
}
static struct mptcp_pm_addr_entry *
-__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
- bool lookup_by_id)
+__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
{
struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if ((!lookup_by_id &&
- mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) ||
- (lookup_by_id && entry->addr.id == info->id))
+ if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port))
return entry;
}
return NULL;
@@ -543,7 +534,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
rcu_read_lock();
- entry = __lookup_addr(pernet, &mpc_addr, false);
+ entry = __lookup_addr(pernet, &mpc_addr);
if (entry) {
__clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
msk->mpc_endpoint_id = entry->addr.id;
@@ -1550,8 +1541,8 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
}
}
-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
- struct list_head *rm_list)
+static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ struct list_head *rm_list)
{
struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
struct mptcp_pm_addr_entry *entry;
@@ -1636,8 +1627,8 @@ int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static int mptcp_nl_fill_addr(struct sk_buff *skb,
- struct mptcp_pm_addr_entry *entry)
+int mptcp_nl_fill_addr(struct sk_buff *skb,
+ struct mptcp_pm_addr_entry *entry)
{
struct mptcp_addr_info *addr = &entry->addr;
struct nlattr *attr;
@@ -1675,7 +1666,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
@@ -1725,8 +1716,13 @@ fail:
return ret;
}
-int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ return mptcp_pm_get_addr(skb, info);
+}
+
+int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb)
{
struct net *net = sock_net(msg->sk);
struct mptcp_pm_addr_entry *entry;
@@ -1768,6 +1764,12 @@ int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
return msg->len;
}
+int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ return mptcp_pm_dump_addr(msg, cb);
+}
+
static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
{
struct nlattr *attr = info->attrs[id];
@@ -1882,66 +1884,63 @@ next:
return ret;
}
-int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup)
+int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info)
{
- struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
+ struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP |
MPTCP_PM_ADDR_FLAG_FULLMESH;
+ struct net *net = sock_net(skb->sk);
struct mptcp_pm_addr_entry *entry;
+ struct pm_nl_pernet *pernet;
u8 lookup_by_id = 0;
+ u8 bkup = 0;
+ int ret;
+
+ pernet = pm_nl_get_pernet(net);
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
+ if (ret < 0)
+ return ret;
- if (addr->addr.family == AF_UNSPEC) {
+ if (addr.addr.family == AF_UNSPEC) {
lookup_by_id = 1;
- if (!addr->addr.id)
+ if (!addr.addr.id) {
+ GENL_SET_ERR_MSG(info, "missing required inputs");
return -EOPNOTSUPP;
+ }
}
+ if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
+ bkup = 1;
+
spin_lock_bh(&pernet->lock);
- entry = __lookup_addr(pernet, &addr->addr, lookup_by_id);
+ entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) :
+ __lookup_addr(pernet, &addr.addr);
if (!entry) {
spin_unlock_bh(&pernet->lock);
+ GENL_SET_ERR_MSG(info, "address not found");
return -EINVAL;
}
- if ((addr->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
+ if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
spin_unlock_bh(&pernet->lock);
+ GENL_SET_ERR_MSG(info, "invalid addr flags");
return -EINVAL;
}
- changed = (addr->flags ^ entry->flags) & mask;
- entry->flags = (entry->flags & ~mask) | (addr->flags & mask);
- *addr = *entry;
+ changed = (addr.flags ^ entry->flags) & mask;
+ entry->flags = (entry->flags & ~mask) | (addr.flags & mask);
+ addr = *entry;
spin_unlock_bh(&pernet->lock);
- mptcp_nl_set_flags(net, &addr->addr, bkup, changed);
+ mptcp_nl_set_flags(net, &addr.addr, bkup, changed);
return 0;
}
int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct mptcp_pm_addr_entry remote = { .addr = { .family = AF_UNSPEC }, };
- struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
- struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
- struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
- struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
- struct net *net = sock_net(skb->sk);
- u8 bkup = 0;
- int ret;
-
- ret = mptcp_pm_parse_entry(attr, info, false, &addr);
- if (ret < 0)
- return ret;
-
- if (attr_rem) {
- ret = mptcp_pm_parse_entry(attr_rem, info, false, &remote);
- if (ret < 0)
- return ret;
- }
-
- if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
- bkup = 1;
-
- return mptcp_pm_set_flags(net, token, &addr, &remote, bkup);
+ return mptcp_pm_set_flags(skb, info);
}
static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp)
@@ -2014,7 +2013,7 @@ static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
const struct mptcp_subflow_context *sf;
u8 sk_err;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
return -EMSGSIZE;
if (mptcp_event_add_subflow(skb, ssk))
@@ -2072,7 +2071,7 @@ static int mptcp_event_created(struct sk_buff *skb,
const struct mptcp_sock *msk,
const struct sock *ssk)
{
- int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
+ int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token));
if (err)
return err;
@@ -2100,7 +2099,7 @@ void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
if (!nlh)
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id))
@@ -2135,7 +2134,7 @@ void mptcp_event_addr_announced(const struct sock *ssk,
if (!nlh)
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id))
@@ -2251,7 +2250,7 @@ void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
goto nla_put_failure;
break;
case MPTCP_EVENT_CLOSED:
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token) < 0)
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0)
goto nla_put_failure;
break;
case MPTCP_EVENT_ANNOUNCED:
@@ -2281,7 +2280,7 @@ nla_put_failure:
nlmsg_free(skb);
}
-static struct genl_family mptcp_genl_family __ro_after_init = {
+struct genl_family mptcp_genl_family __ro_after_init = {
.name = MPTCP_PM_NAME,
.version = MPTCP_PM_VER,
.netnsok = true,
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index bc97cc30f013..9f5d422d5ef6 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -106,19 +106,26 @@ static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
return -EINVAL;
}
+static struct mptcp_pm_addr_entry *
+mptcp_userspace_pm_lookup_addr_by_id(struct mptcp_sock *msk, unsigned int id)
+{
+ struct mptcp_pm_addr_entry *entry;
+
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (entry->addr.id == id)
+ return entry;
+ }
+ return NULL;
+}
+
int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
unsigned int id,
u8 *flags, int *ifindex)
{
- struct mptcp_pm_addr_entry *entry, *match = NULL;
+ struct mptcp_pm_addr_entry *match;
spin_lock_bh(&msk->pm.lock);
- list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
- if (id == entry->addr.id) {
- match = entry;
- break;
- }
- }
+ match = mptcp_userspace_pm_lookup_addr_by_id(msk, id);
spin_unlock_bh(&msk->pm.lock);
if (match) {
*flags = match->flags;
@@ -261,7 +268,7 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID];
- struct mptcp_pm_addr_entry *match = NULL;
+ struct mptcp_pm_addr_entry *match;
struct mptcp_pm_addr_entry *entry;
struct mptcp_sock *msk;
LIST_HEAD(free_list);
@@ -298,13 +305,7 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
lock_sock(sk);
- list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
- if (entry->addr.id == id_val) {
- match = entry;
- break;
- }
- }
-
+ match = mptcp_userspace_pm_lookup_addr_by_id(msk, id_val);
if (!match) {
GENL_SET_ERR_MSG(info, "address with specified id not found");
release_sock(sk);
@@ -334,7 +335,6 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
struct mptcp_pm_addr_entry local = { 0 };
struct mptcp_addr_info addr_r;
- struct mptcp_addr_info addr_l;
struct mptcp_sock *msk;
int err = -EINVAL;
struct sock *sk;
@@ -360,25 +360,31 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
goto create_err;
}
- err = mptcp_pm_parse_addr(laddr, info, &addr_l);
+ err = mptcp_pm_parse_entry(laddr, info, true, &local);
if (err < 0) {
NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr");
goto create_err;
}
+ if (local.flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
+ GENL_SET_ERR_MSG(info, "invalid addr flags");
+ err = -EINVAL;
+ goto create_err;
+ }
+ local.flags |= MPTCP_PM_ADDR_FLAG_SUBFLOW;
+
err = mptcp_pm_parse_addr(raddr, info, &addr_r);
if (err < 0) {
NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr");
goto create_err;
}
- if (!mptcp_pm_addr_families_match(sk, &addr_l, &addr_r)) {
+ if (!mptcp_pm_addr_families_match(sk, &local.addr, &addr_r)) {
GENL_SET_ERR_MSG(info, "families mismatch");
err = -EINVAL;
goto create_err;
}
- local.addr = addr_l;
err = mptcp_userspace_pm_append_new_local_addr(msk, &local, false);
if (err < 0) {
GENL_SET_ERR_MSG(info, "did not match address and id");
@@ -387,7 +393,7 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
lock_sock(sk);
- err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+ err = __mptcp_subflow_connect(sk, &local.addr, &addr_r);
release_sock(sk);
@@ -540,35 +546,194 @@ destroy_err:
return err;
}
-int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup)
+int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info)
{
+ struct mptcp_pm_addr_entry loc = { .addr = { .family = AF_UNSPEC }, };
+ struct mptcp_pm_addr_entry rem = { .addr = { .family = AF_UNSPEC }, };
+ struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct net *net = sock_net(skb->sk);
struct mptcp_sock *msk;
int ret = -EINVAL;
struct sock *sk;
u32 token_val;
+ u8 bkup = 0;
token_val = nla_get_u32(token);
msk = mptcp_token_get_sock(net, token_val);
- if (!msk)
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
return ret;
+ }
sk = (struct sock *)msk;
- if (!mptcp_pm_is_userspace(msk))
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "userspace PM not selected");
goto set_flags_err;
+ }
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &loc);
+ if (ret < 0)
+ goto set_flags_err;
+
+ if (attr_rem) {
+ ret = mptcp_pm_parse_entry(attr_rem, info, false, &rem);
+ if (ret < 0)
+ goto set_flags_err;
+ }
- if (loc->addr.family == AF_UNSPEC ||
- rem->addr.family == AF_UNSPEC)
+ if (loc.addr.family == AF_UNSPEC ||
+ rem.addr.family == AF_UNSPEC) {
+ GENL_SET_ERR_MSG(info, "invalid address families");
+ ret = -EINVAL;
goto set_flags_err;
+ }
+
+ if (loc.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
+ bkup = 1;
lock_sock(sk);
- ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc->addr, &rem->addr, bkup);
+ ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc.addr, &rem.addr, bkup);
release_sock(sk);
set_flags_err:
sock_put(sk);
return ret;
}
+
+int mptcp_userspace_pm_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct id_bitmap {
+ DECLARE_BITMAP(map, MPTCP_PM_MAX_ADDR_ID + 1);
+ } *bitmap;
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net *net = sock_net(msg->sk);
+ struct mptcp_pm_addr_entry *entry;
+ struct mptcp_sock *msk;
+ struct nlattr *token;
+ int ret = -EINVAL;
+ struct sock *sk;
+ void *hdr;
+
+ bitmap = (struct id_bitmap *)cb->ctx;
+ token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+
+ msk = mptcp_token_get_sock(net, nla_get_u32(token));
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return ret;
+ }
+
+ sk = (struct sock *)msk;
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto out;
+ }
+
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (test_bit(entry->addr.id, bitmap->map))
+ continue;
+
+ hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &mptcp_genl_family,
+ NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR);
+ if (!hdr)
+ break;
+
+ if (mptcp_nl_fill_addr(msg, entry) < 0) {
+ genlmsg_cancel(msg, hdr);
+ break;
+ }
+
+ __set_bit(entry->addr.id, bitmap->map);
+ genlmsg_end(msg, hdr);
+ }
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ ret = msg->len;
+
+out:
+ sock_put(sk);
+ return ret;
+}
+
+int mptcp_userspace_pm_get_addr(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct mptcp_pm_addr_entry addr, *entry;
+ struct net *net = sock_net(skb->sk);
+ struct mptcp_sock *msk;
+ struct sk_buff *msg;
+ int ret = -EINVAL;
+ struct sock *sk;
+ void *reply;
+
+ msk = mptcp_token_get_sock(net, nla_get_u32(token));
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return ret;
+ }
+
+ sk = (struct sock *)msk;
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto out;
+ }
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
+ if (ret < 0)
+ goto out;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
+ info->genlhdr->cmd);
+ if (!reply) {
+ GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
+ ret = -EMSGSIZE;
+ goto fail;
+ }
+
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
+ entry = mptcp_userspace_pm_lookup_addr_by_id(msk, addr.addr.id);
+ if (!entry) {
+ GENL_SET_ERR_MSG(info, "address not found");
+ ret = -EINVAL;
+ goto unlock_fail;
+ }
+
+ ret = mptcp_nl_fill_addr(msg, entry);
+ if (ret)
+ goto unlock_fail;
+
+ genlmsg_end(msg, reply);
+ ret = genlmsg_reply(msg, info);
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ sock_put(sk);
+ return ret;
+
+unlock_fail:
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+fail:
+ nlmsg_free(msg);
+out:
+ sock_put(sk);
+ return ret;
+}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7833a49f6214..3a1967bc7bad 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -15,7 +15,6 @@
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
-#include <net/tcp.h>
#include <net/tcp_states.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
@@ -410,6 +409,7 @@ static void mptcp_close_wake_up(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+/* called under the msk socket lock */
static bool mptcp_pending_data_fin_ack(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -441,16 +441,17 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
}
}
+/* can be called with no lock acquired */
static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
{
struct mptcp_sock *msk = mptcp_sk(sk);
if (READ_ONCE(msk->rcv_data_fin) &&
- ((1 << sk->sk_state) &
+ ((1 << inet_sk_state_load(sk)) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
- if (msk->ack_seq == rcv_data_fin_seq) {
+ if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) {
if (seq)
*seq = rcv_data_fin_seq;
@@ -748,7 +749,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
__skb_queue_tail(&sk->sk_receive_queue, skb);
}
msk->bytes_received += end_seq - msk->ack_seq;
- msk->ack_seq = end_seq;
+ WRITE_ONCE(msk->ack_seq, end_seq);
moved = true;
}
return moved;
@@ -985,6 +986,7 @@ static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
put_page(dfrag->page);
}
+/* called under both the msk socket lock and the data lock */
static void __mptcp_clean_una(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1033,13 +1035,15 @@ static void __mptcp_clean_una(struct sock *sk)
msk->recovery = false;
out:
- if (snd_una == READ_ONCE(msk->snd_nxt) &&
- snd_una == READ_ONCE(msk->write_seq)) {
+ if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) {
if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
mptcp_stop_rtx_timer(sk);
} else {
mptcp_reset_rtx_timer(sk);
}
+
+ if (mptcp_pending_data_fin_ack(sk))
+ mptcp_schedule_work(sk);
}
static void __mptcp_clean_una_wakeup(struct sock *sk)
@@ -1500,7 +1504,7 @@ static void mptcp_update_post_push(struct mptcp_sock *msk,
*/
if (likely(after64(snd_nxt_new, msk->snd_nxt))) {
msk->bytes_sent += snd_nxt_new - msk->snd_nxt;
- msk->snd_nxt = snd_nxt_new;
+ WRITE_ONCE(msk->snd_nxt, snd_nxt_new);
}
}
@@ -1687,15 +1691,6 @@ out:
}
}
-static void mptcp_set_nospace(struct sock *sk)
-{
- /* enable autotune */
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-
- /* will be cleared on avail space */
- set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
-}
-
static int mptcp_disconnect(struct sock *sk, int flags);
static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
@@ -1766,6 +1761,30 @@ static int do_copy_data_nocache(struct sock *sk, int copy,
return 0;
}
+/* open-code sk_stream_memory_free() plus sent limit computation to
+ * avoid indirect calls in fast-path.
+ * Called under the msk socket lock, so we can avoid a bunch of ONCE
+ * annotations.
+ */
+static u32 mptcp_send_limit(const struct sock *sk)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+ u32 limit, not_sent;
+
+ if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
+ return 0;
+
+ limit = mptcp_notsent_lowat(sk);
+ if (limit == UINT_MAX)
+ return UINT_MAX;
+
+ not_sent = msk->write_seq - msk->snd_nxt;
+ if (not_sent >= limit)
+ return 0;
+
+ return limit - not_sent;
+}
+
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1810,6 +1829,12 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct mptcp_data_frag *dfrag;
bool dfrag_collapsed;
size_t psize, offset;
+ u32 copy_limit;
+
+ /* ensure fitting the notsent_lowat() constraint */
+ copy_limit = mptcp_send_limit(sk);
+ if (!copy_limit)
+ goto wait_for_memory;
/* reuse tail pfrag, if possible, or carve a new one from the
* page allocator
@@ -1817,9 +1842,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
dfrag = mptcp_pending_tail(sk);
dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
if (!dfrag_collapsed) {
- if (!sk_stream_memory_free(sk))
- goto wait_for_memory;
-
if (!mptcp_page_frag_refill(sk, pfrag))
goto wait_for_memory;
@@ -1834,6 +1856,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
offset = dfrag->offset + dfrag->data_len;
psize = pfrag->size - offset;
psize = min_t(size_t, psize, msg_data_left(msg));
+ psize = min_t(size_t, psize, copy_limit);
total_ts = psize + frag_truesize;
if (!sk_wmem_schedule(sk, total_ts))
@@ -1869,7 +1892,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
continue;
wait_for_memory:
- mptcp_set_nospace(sk);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
__mptcp_push_pending(sk, msg->msg_flags);
ret = sk_stream_wait_memory(sk, &timeo);
if (ret)
@@ -2115,7 +2138,7 @@ static unsigned int mptcp_inq_hint(const struct sock *sk)
skb = skb_peek(&msk->receive_queue);
if (skb) {
- u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+ u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq;
if (hint_val >= INT_MAX)
return INT_MAX;
@@ -2759,7 +2782,7 @@ static void __mptcp_init_sock(struct sock *sk)
__skb_queue_head_init(&msk->receive_queue);
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
- msk->rmem_fwd_alloc = 0;
+ WRITE_ONCE(msk->rmem_fwd_alloc, 0);
WRITE_ONCE(msk->rmem_released, 0);
msk->timer_ival = TCP_RTO_MIN;
msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
@@ -2975,7 +2998,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
sk->sk_prot->destroy(sk);
- WARN_ON_ONCE(msk->rmem_fwd_alloc);
+ WARN_ON_ONCE(READ_ONCE(msk->rmem_fwd_alloc));
WARN_ON_ONCE(msk->rmem_released);
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
@@ -3150,16 +3173,16 @@ static int mptcp_disconnect(struct sock *sk, int flags)
WRITE_ONCE(msk->flags, 0);
msk->cb_flags = 0;
msk->recovery = false;
- msk->can_ack = false;
- msk->fully_established = false;
- msk->rcv_data_fin = false;
- msk->snd_data_fin_enable = false;
- msk->rcv_fastclose = false;
- msk->use_64bit_ack = false;
- msk->bytes_consumed = 0;
+ WRITE_ONCE(msk->can_ack, false);
+ WRITE_ONCE(msk->fully_established, false);
+ WRITE_ONCE(msk->rcv_data_fin, false);
+ WRITE_ONCE(msk->snd_data_fin_enable, false);
+ WRITE_ONCE(msk->rcv_fastclose, false);
+ WRITE_ONCE(msk->use_64bit_ack, false);
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
mptcp_pm_data_reset(msk);
mptcp_ca_reset(sk);
+ msk->bytes_consumed = 0;
msk->bytes_acked = 0;
msk->bytes_received = 0;
msk->bytes_sent = 0;
@@ -3250,17 +3273,17 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
mptcp_copy_ip_options(nsk, sk);
msk = mptcp_sk(nsk);
- msk->local_key = subflow_req->local_key;
- msk->token = subflow_req->token;
+ WRITE_ONCE(msk->local_key, subflow_req->local_key);
+ WRITE_ONCE(msk->token, subflow_req->token);
msk->in_accept_queue = 1;
WRITE_ONCE(msk->fully_established, false);
if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
WRITE_ONCE(msk->csum_enabled, true);
- msk->write_seq = subflow_req->idsn + 1;
- msk->snd_nxt = msk->write_seq;
- msk->snd_una = msk->write_seq;
- msk->wnd_end = msk->snd_nxt + tcp_sk(ssk)->snd_wnd;
+ WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1);
+ WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+ WRITE_ONCE(msk->snd_una, msk->write_seq);
+ WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
mptcp_init_sched(msk, mptcp_sk(sk)->sched);
@@ -3363,9 +3386,6 @@ void __mptcp_data_acked(struct sock *sk)
__mptcp_clean_una(sk);
else
__set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
-
- if (mptcp_pending_data_fin_ack(sk))
- mptcp_schedule_work(sk);
}
void __mptcp_check_push(struct sock *sk, struct sock *ssk)
@@ -3767,6 +3787,7 @@ static struct proto mptcp_prot = {
.unhash = mptcp_unhash,
.get_port = mptcp_get_port,
.forward_alloc_get = mptcp_forward_alloc_get,
+ .stream_memory_free = mptcp_stream_memory_free,
.sockets_allocated = &mptcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
@@ -3940,12 +3961,12 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
- if (sk_stream_is_writeable(sk))
+ if (__mptcp_stream_is_writeable(sk, 1))
return EPOLLOUT | EPOLLWRNORM;
- mptcp_set_nospace(sk);
- smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
- if (sk_stream_is_writeable(sk))
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */
+ if (__mptcp_stream_is_writeable(sk, 1))
return EPOLLOUT | EPOLLWRNORM;
return 0;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 07f6242afc1a..a10ebf3ee10a 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -113,10 +113,9 @@
#define MPTCP_RST_TRANSIENT BIT(0)
/* MPTCP socket atomic flags */
-#define MPTCP_NOSPACE 1
-#define MPTCP_WORK_RTX 2
-#define MPTCP_FALLBACK_DONE 4
-#define MPTCP_WORK_CLOSE_SUBFLOW 5
+#define MPTCP_WORK_RTX 1
+#define MPTCP_FALLBACK_DONE 2
+#define MPTCP_WORK_CLOSE_SUBFLOW 3
/* MPTCP socket release cb flags */
#define MPTCP_PUSH_PENDING 1
@@ -260,8 +259,10 @@ struct mptcp_data_frag {
struct mptcp_sock {
/* inet_connection_sock must be the first member */
struct inet_connection_sock sk;
- u64 local_key;
- u64 remote_key;
+ u64 local_key; /* protected by the first subflow socket lock
+ * lockless access read
+ */
+ u64 remote_key; /* same as above */
u64 write_seq;
u64 bytes_sent;
u64 snd_nxt;
@@ -306,6 +307,7 @@ struct mptcp_sock {
in_accept_queue:1,
free_first:1,
rcvspace_init:1;
+ u32 notsent_lowat;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
@@ -341,12 +343,30 @@ struct mptcp_sock {
#define mptcp_for_each_subflow_safe(__msk, __subflow, __tmp) \
list_for_each_entry_safe(__subflow, __tmp, &((__msk)->conn_list), node)
+extern struct genl_family mptcp_genl_family;
+
static inline void msk_owned_by_me(const struct mptcp_sock *msk)
{
sock_owned_by_me((const struct sock *)msk);
}
+#ifdef CONFIG_DEBUG_NET
+/* MPTCP-specific: we might (indirectly) call this helper with the wrong sk */
+#undef tcp_sk
+#define tcp_sk(ptr) ({ \
+ typeof(ptr) _ptr = (ptr); \
+ WARN_ON(_ptr->sk_protocol != IPPROTO_TCP); \
+ container_of_const(_ptr, struct tcp_sock, inet_conn.icsk_inet.sk); \
+})
+#define mptcp_sk(ptr) ({ \
+ typeof(ptr) _ptr = (ptr); \
+ WARN_ON(_ptr->sk_protocol != IPPROTO_MPTCP); \
+ container_of_const(_ptr, struct mptcp_sock, sk.icsk_inet.sk); \
+})
+
+#else /* !CONFIG_DEBUG_NET */
#define mptcp_sk(ptr) container_of_const(ptr, struct mptcp_sock, sk.icsk_inet.sk)
+#endif
/* the msk socket don't use the backlog, also account for the bulk
* free memory
@@ -400,7 +420,7 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- if (msk->snd_una == READ_ONCE(msk->snd_nxt))
+ if (msk->snd_una == msk->snd_nxt)
return NULL;
return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
@@ -790,14 +810,36 @@ static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
}
+static inline u32 mptcp_notsent_lowat(const struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ u32 val;
+
+ val = READ_ONCE(mptcp_sk(sk)->notsent_lowat);
+ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
+}
+
+static inline bool mptcp_stream_memory_free(const struct sock *sk, int wake)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+ u32 notsent_bytes;
+
+ notsent_bytes = READ_ONCE(msk->write_seq) - READ_ONCE(msk->snd_nxt);
+ return (notsent_bytes << wake) < mptcp_notsent_lowat(sk);
+}
+
+static inline bool __mptcp_stream_is_writeable(const struct sock *sk, int wake)
+{
+ return mptcp_stream_memory_free(sk, wake) &&
+ __sk_stream_is_writeable(sk, wake);
+}
+
static inline void mptcp_write_space(struct sock *sk)
{
- if (sk_stream_is_writeable(sk)) {
- /* pairs with memory barrier in mptcp_poll */
- smp_mb();
- if (test_and_clear_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags))
- sk_stream_write_space(sk);
- }
+ /* pairs with memory barrier in mptcp_poll */
+ smp_mb();
+ if (mptcp_stream_memory_free(sk, 1))
+ sk_stream_write_space(sk);
}
static inline void __mptcp_sync_sndbuf(struct sock *sk)
@@ -808,7 +850,7 @@ static inline void __mptcp_sync_sndbuf(struct sock *sk)
if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
return;
- new_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[0];
+ new_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]);
mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
ssk_sndbuf = READ_ONCE(mptcp_subflow_tcp_sock(subflow)->sk_sndbuf);
@@ -928,21 +970,15 @@ int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int
int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
unsigned int id,
u8 *flags, int *ifindex);
-int mptcp_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup);
-int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup);
-int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup);
+int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info);
+int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info);
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool echo);
int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
- struct list_head *rm_list);
void mptcp_free_local_addr_list(struct mptcp_sock *msk);
@@ -958,6 +994,8 @@ void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflo
const struct mptcp_options_received *mp_opt);
void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
struct request_sock *req);
+int mptcp_nl_fill_addr(struct sk_buff *skb,
+ struct mptcp_pm_addr_entry *entry);
static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
{
@@ -1022,6 +1060,15 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb);
+int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb);
+int mptcp_userspace_pm_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb);
+int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info);
+int mptcp_userspace_pm_get_addr(struct sk_buff *skb,
+ struct genl_info *info);
static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
{
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index c40f1428e602..dcd1c76d2a3b 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -624,20 +624,11 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
return ret;
}
-static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
+static int __mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, int val)
{
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
- int val;
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (copy_from_sockptr(&val, optval, sizeof(val)))
- return -EFAULT;
-
- lock_sock(sk);
sockopt_seq_inc(msk);
msk->cork = !!val;
mptcp_for_each_subflow(msk, subflow) {
@@ -649,25 +640,15 @@ static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optva
}
if (!val)
mptcp_check_and_set_pending(sk);
- release_sock(sk);
return 0;
}
-static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
+static int __mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, int val)
{
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
- int val;
-
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (copy_from_sockptr(&val, optval, sizeof(val)))
- return -EFAULT;
- lock_sock(sk);
sockopt_seq_inc(msk);
msk->nodelay = !!val;
mptcp_for_each_subflow(msk, subflow) {
@@ -679,8 +660,6 @@ static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t op
}
if (val)
mptcp_check_and_set_pending(sk);
- release_sock(sk);
-
return 0;
}
@@ -803,25 +782,10 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
int ret, val;
switch (optname) {
- case TCP_INQ:
- ret = mptcp_get_int_option(msk, optval, optlen, &val);
- if (ret)
- return ret;
- if (val < 0 || val > 1)
- return -EINVAL;
-
- lock_sock(sk);
- msk->recvmsg_inq = !!val;
- release_sock(sk);
- return 0;
case TCP_ULP:
return -EOPNOTSUPP;
case TCP_CONGESTION:
return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen);
- case TCP_CORK:
- return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen);
- case TCP_NODELAY:
- return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen);
case TCP_DEFER_ACCEPT:
/* See tcp.c: TCP_DEFER_ACCEPT does not fail */
mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen);
@@ -834,7 +798,34 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
optval, optlen);
}
- return -EOPNOTSUPP;
+ ret = mptcp_get_int_option(msk, optval, optlen, &val);
+ if (ret)
+ return ret;
+
+ lock_sock(sk);
+ switch (optname) {
+ case TCP_INQ:
+ if (val < 0 || val > 1)
+ ret = -EINVAL;
+ else
+ msk->recvmsg_inq = !!val;
+ break;
+ case TCP_NOTSENT_LOWAT:
+ WRITE_ONCE(msk->notsent_lowat, val);
+ mptcp_write_space(sk);
+ break;
+ case TCP_CORK:
+ ret = __mptcp_setsockopt_sol_tcp_cork(msk, val);
+ break;
+ case TCP_NODELAY:
+ ret = __mptcp_setsockopt_sol_tcp_nodelay(msk, val);
+ break;
+ default:
+ ret = -ENOPROTOOPT;
+ }
+
+ release_sock(sk);
+ return ret;
}
int mptcp_setsockopt(struct sock *sk, int level, int optname,
@@ -942,7 +933,7 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
mptcp_data_unlock(sk);
slow = lock_sock_fast(sk);
- info->mptcpi_csum_enabled = msk->csum_enabled;
+ info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled);
info->mptcpi_token = msk->token;
info->mptcpi_write_seq = msk->write_seq;
info->mptcpi_retransmits = inet_csk(sk)->icsk_retransmits;
@@ -1349,6 +1340,8 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
return mptcp_put_int_option(msk, optval, optlen, msk->cork);
case TCP_NODELAY:
return mptcp_put_int_option(msk, optval, optlen, msk->nodelay);
+ case TCP_NOTSENT_LOWAT:
+ return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat);
}
return -EOPNOTSUPP;
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 71ba86246ff8..1626dd20c68f 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -15,13 +15,11 @@
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
-#include <net/tcp.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/ip6_route.h>
#include <net/transp_v6.h>
#endif
#include <net/mptcp.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
#include "mib.h"
@@ -75,7 +73,8 @@ static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_
get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
- subflow_generate_hmac(msk->local_key, msk->remote_key,
+ subflow_generate_hmac(READ_ONCE(msk->local_key),
+ READ_ONCE(msk->remote_key),
subflow_req->local_nonce,
subflow_req->remote_nonce, hmac);
@@ -714,7 +713,8 @@ static bool subflow_hmac_valid(const struct request_sock *req,
if (!msk)
return false;
- subflow_generate_hmac(msk->remote_key, msk->local_key,
+ subflow_generate_hmac(READ_ONCE(msk->remote_key),
+ READ_ONCE(msk->local_key),
subflow_req->remote_nonce,
subflow_req->local_nonce, hmac);
@@ -1548,8 +1548,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
&flags, &ifindex);
subflow->remote_key_valid = 1;
- subflow->remote_key = msk->remote_key;
- subflow->local_key = msk->local_key;
+ subflow->remote_key = READ_ONCE(msk->remote_key);
+ subflow->local_key = READ_ONCE(msk->local_key);
subflow->token = msk->token;
mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
index bfff53e668da..4fc39fa2e262 100644
--- a/net/mptcp/token_test.c
+++ b/net/mptcp/token_test.c
@@ -52,14 +52,19 @@ static struct mptcp_subflow_context *build_ctx(struct kunit *test)
static struct mptcp_sock *build_msk(struct kunit *test)
{
struct mptcp_sock *msk;
+ struct sock *sk;
msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
sock_net_set((struct sock *)msk, &init_net);
+ sk = (struct sock *)msk;
+
/* be sure the token helpers can dereference sk->sk_prot */
- ((struct sock *)msk)->sk_prot = &tcp_prot;
+ sk->sk_prot = &tcp_prot;
+ sk->sk_protocol = IPPROTO_MPTCP;
+
return msk;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 441d1f134110..df2dc21304ef 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -818,7 +818,7 @@ config NETFILTER_XT_TARGET_AUDIT
config NETFILTER_XT_TARGET_CHECKSUM
tristate "CHECKSUM target support"
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `CHECKSUM' target, which can be used in the iptables mangle
@@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_CONNSECMARK
config NETFILTER_XT_TARGET_CT
tristate '"CT" target support'
depends on NF_CONNTRACK
- depends on IP_NF_RAW || IP6_NF_RAW
+ depends on IP_NF_RAW || IP6_NF_RAW || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This options adds a `CT' target, which allows to specify initial
@@ -880,7 +880,7 @@ config NETFILTER_XT_TARGET_CT
config NETFILTER_XT_TARGET_DSCP
tristate '"DSCP" and "TOS" target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `DSCP' target, which allows you to manipulate
@@ -896,7 +896,7 @@ config NETFILTER_XT_TARGET_DSCP
config NETFILTER_XT_TARGET_HL
tristate '"HL" hoplimit target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds the "HL" (for IPv6) and "TTL" (for IPv4)
@@ -1080,7 +1080,7 @@ config NETFILTER_XT_TARGET_TPROXY
depends on NETFILTER_ADVANCED
depends on IPV6 || IPV6=n
depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
- depends on IP_NF_MANGLE
+ depends on IP_NF_MANGLE || NFT_COMPAT
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
select NF_TPROXY_IPV4
@@ -1147,7 +1147,7 @@ config NETFILTER_XT_TARGET_TCPMSS
config NETFILTER_XT_TARGET_TCPOPTSTRIP
tristate '"TCPOPTSTRIP" target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a "TCPOPTSTRIP" target, which allows you to strip
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a743db073887..98d7dbe3d787 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1511,9 +1511,7 @@ int __init ip_vs_conn_init(void)
return -ENOMEM;
/* Allocate ip_vs_conn slab cache */
- ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
- sizeof(struct ip_vs_conn), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ ip_vs_conn_cachep = KMEM_CACHE(ip_vs_conn, SLAB_HWCACHE_ALIGN);
if (!ip_vs_conn_cachep) {
kvfree(ip_vs_conn_tab);
return -ENOMEM;
diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
index 0e4beae421f8..5257d5e7eb09 100644
--- a/net/netfilter/nf_bpf_link.c
+++ b/net/netfilter/nf_bpf_link.c
@@ -314,7 +314,7 @@ static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
static const struct bpf_func_proto *
bpf_nf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
const struct bpf_verifier_ops netfilter_verifier_ops = {
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 5d8ed6c90b7e..8715617b02fe 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -605,15 +605,11 @@ static int __init nf_conncount_modinit(void)
for (i = 0; i < CONNCOUNT_SLOTS; ++i)
spin_lock_init(&nf_conncount_locks[i]);
- conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
- sizeof(struct nf_conncount_tuple),
- 0, 0, NULL);
+ conncount_conn_cachep = KMEM_CACHE(nf_conncount_tuple, 0);
if (!conncount_conn_cachep)
return -ENOMEM;
- conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
- sizeof(struct nf_conncount_rb),
- 0, 0, NULL);
+ conncount_rb_cachep = KMEM_CACHE(nf_conncount_rb, 0);
if (!conncount_rb_cachep) {
kmem_cache_destroy(conncount_conn_cachep);
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index 475358ec8212..d2492d050fe6 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -467,7 +467,7 @@ __bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
__bpf_kfunc_end_defs();
-BTF_SET8_START(nf_ct_kfunc_set)
+BTF_KFUNCS_START(nf_ct_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_xdp_ct_lookup, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_skb_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
@@ -478,7 +478,7 @@ BTF_ID_FLAGS(func, bpf_ct_set_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_set_status, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_status, KF_TRUSTED_ARGS)
-BTF_SET8_END(nf_ct_kfunc_set)
+BTF_KFUNCS_END(nf_ct_kfunc_set)
static const struct btf_kfunc_id_set nf_conntrack_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5b876fa7f9af..c63868666bd9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2530,7 +2530,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
* netfilter framework. Roll on, two-stage module
* delete...
*/
- synchronize_net();
+ synchronize_rcu_expedited();
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 81ca348915c9..21fa550966f0 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -722,9 +722,7 @@ int nf_conntrack_expect_init(void)
nf_ct_expect_hsize = 1;
}
nf_ct_expect_max = nf_ct_expect_hsize * 4;
- nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
- sizeof(struct nf_conntrack_expect),
- 0, 0, NULL);
+ nf_ct_expect_cachep = KMEM_CACHE(nf_conntrack_expect, 0);
if (!nf_ct_expect_cachep)
return -ENOMEM;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index e16f158388bb..370f8231385c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -31,10 +31,10 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
int i;
for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
- if (loggers[pf][i] == NULL)
+ log = nft_log_dereference(loggers[pf][i]);
+ if (!log)
continue;
- log = nft_log_dereference(loggers[pf][i]);
if (!strncasecmp(str_logger, log->name, strlen(log->name)))
return log;
}
@@ -156,6 +156,11 @@ int nf_logger_find_get(int pf, enum nf_log_type type)
struct nf_logger *logger;
int ret = -ENOENT;
+ if (pf >= ARRAY_SIZE(loggers))
+ return -EINVAL;
+ if (type >= NF_LOG_TYPE_MAX)
+ return -EINVAL;
+
if (pf == NFPROTO_INET) {
ret = nf_logger_find_get(NFPROTO_IPV4, type);
if (ret < 0)
diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c
index 6e3b2f58855f..481be15609b1 100644
--- a/net/netfilter/nf_nat_bpf.c
+++ b/net/netfilter/nf_nat_bpf.c
@@ -54,9 +54,9 @@ __bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
__bpf_kfunc_end_defs();
-BTF_SET8_START(nf_nat_kfunc_set)
+BTF_KFUNCS_START(nf_nat_kfunc_set)
BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS)
-BTF_SET8_END(nf_nat_kfunc_set)
+BTF_KFUNCS_END(nf_nat_kfunc_set)
static const struct btf_kfunc_id_set nf_bpf_nat_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index e2f334f70281..7f12e56e6e52 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -248,109 +248,3 @@ int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
return 0;
}
EXPORT_SYMBOL_GPL(nf_queue);
-
-static unsigned int nf_iterate(struct sk_buff *skb,
- struct nf_hook_state *state,
- const struct nf_hook_entries *hooks,
- unsigned int *index)
-{
- const struct nf_hook_entry *hook;
- unsigned int verdict, i = *index;
-
- while (i < hooks->num_hook_entries) {
- hook = &hooks->hooks[i];
-repeat:
- verdict = nf_hook_entry_hookfn(hook, skb, state);
- if (verdict != NF_ACCEPT) {
- *index = i;
- if (verdict != NF_REPEAT)
- return verdict;
- goto repeat;
- }
- i++;
- }
-
- *index = i;
- return NF_ACCEPT;
-}
-
-static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
-{
- switch (pf) {
-#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
- case NFPROTO_BRIDGE:
- return rcu_dereference(net->nf.hooks_bridge[hooknum]);
-#endif
- case NFPROTO_IPV4:
- return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
- case NFPROTO_IPV6:
- return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
- default:
- WARN_ON_ONCE(1);
- return NULL;
- }
-
- return NULL;
-}
-
-/* Caller must hold rcu read-side lock */
-void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
-{
- const struct nf_hook_entry *hook_entry;
- const struct nf_hook_entries *hooks;
- struct sk_buff *skb = entry->skb;
- const struct net *net;
- unsigned int i;
- int err;
- u8 pf;
-
- net = entry->state.net;
- pf = entry->state.pf;
-
- hooks = nf_hook_entries_head(net, pf, entry->state.hook);
-
- i = entry->hook_index;
- if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
- kfree_skb(skb);
- nf_queue_entry_free(entry);
- return;
- }
-
- hook_entry = &hooks->hooks[i];
-
- /* Continue traversal iff userspace said ok... */
- if (verdict == NF_REPEAT)
- verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
-
- if (verdict == NF_ACCEPT) {
- if (nf_reroute(skb, entry) < 0)
- verdict = NF_DROP;
- }
-
- if (verdict == NF_ACCEPT) {
-next_hook:
- ++i;
- verdict = nf_iterate(skb, &entry->state, hooks, &i);
- }
-
- switch (verdict & NF_VERDICT_MASK) {
- case NF_ACCEPT:
- case NF_STOP:
- local_bh_disable();
- entry->state.okfn(entry->state.net, entry->state.sk, skb);
- local_bh_enable();
- break;
- case NF_QUEUE:
- err = nf_queue(skb, &entry->state, i, verdict);
- if (err == 1)
- goto next_hook;
- break;
- case NF_STOLEN:
- break;
- default:
- kfree_skb(skb);
- }
-
- nf_queue_entry_free(entry);
-}
-EXPORT_SYMBOL(nf_reinject);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index fbbc4fd37349..5b140c12b7df 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -800,7 +800,7 @@ synproxy_build_ip_ipv6(struct net *net, struct sk_buff *skb,
skb_reset_network_header(skb);
iph = skb_put(skb, sizeof(*iph));
ip6_flow_hdr(iph, 0, 0);
- iph->hop_limit = net->ipv6.devconf_all->hop_limit;
+ iph->hop_limit = READ_ONCE(net->ipv6.devconf_all->hop_limit);
iph->nexthdr = IPPROTO_TCP;
iph->saddr = *saddr;
iph->daddr = *daddr;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 1683dc196b59..e93f905e60b6 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1195,8 +1195,10 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table)
#define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1)
#define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0)
#define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_WAS_ORPHAN (__NFT_TABLE_F_INTERNAL << 2)
#define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
- __NFT_TABLE_F_WAS_AWAKEN)
+ __NFT_TABLE_F_WAS_AWAKEN | \
+ __NFT_TABLE_F_WAS_ORPHAN)
static int nf_tables_updtable(struct nft_ctx *ctx)
{
@@ -1216,8 +1218,11 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if ((nft_table_has_owner(ctx->table) &&
!(flags & NFT_TABLE_F_OWNER)) ||
- (!nft_table_has_owner(ctx->table) &&
- flags & NFT_TABLE_F_OWNER))
+ (flags & NFT_TABLE_F_OWNER &&
+ !nft_table_is_orphan(ctx->table)))
+ return -EOPNOTSUPP;
+
+ if ((flags ^ ctx->table->flags) & NFT_TABLE_F_PERSIST)
return -EOPNOTSUPP;
/* No dormant off/on/off/on games in single transaction */
@@ -1246,6 +1251,13 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
}
}
+ if ((flags & NFT_TABLE_F_OWNER) &&
+ !nft_table_has_owner(ctx->table)) {
+ ctx->table->nlpid = ctx->portid;
+ ctx->table->flags |= NFT_TABLE_F_OWNER |
+ __NFT_TABLE_F_WAS_ORPHAN;
+ }
+
nft_trans_table_update(trans) = true;
nft_trans_commit_list_add_tail(ctx->net, trans);
@@ -4238,23 +4250,18 @@ static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags)
* given, in that case the amount of memory per element is used.
*/
static const struct nft_set_ops *
-nft_select_set_ops(const struct nft_ctx *ctx,
- const struct nlattr * const nla[],
+nft_select_set_ops(const struct nft_ctx *ctx, u32 flags,
const struct nft_set_desc *desc)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
const struct nft_set_ops *ops, *bops;
struct nft_set_estimate est, best;
const struct nft_set_type *type;
- u32 flags = 0;
int i;
lockdep_assert_held(&nft_net->commit_mutex);
lockdep_nfnl_nft_mutex_not_held();
- if (nla[NFTA_SET_FLAGS] != NULL)
- flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
-
bops = NULL;
best.size = ~0;
best.lookup = ~0;
@@ -5146,7 +5153,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
- ops = nft_select_set_ops(&ctx, nla, &desc);
+ ops = nft_select_set_ops(&ctx, flags, &desc);
if (IS_ERR(ops))
return PTR_ERR(ops);
@@ -10435,6 +10442,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
} else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
}
+ if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_ORPHAN) {
+ trans->ctx.table->flags &= ~NFT_TABLE_F_OWNER;
+ trans->ctx.table->nlpid = 0;
+ }
trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
nft_trans_destroy(trans);
} else {
@@ -11361,6 +11372,10 @@ again:
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table) &&
n->portid == table->nlpid) {
+ if (table->flags & NFT_TABLE_F_PERSIST) {
+ table->flags &= ~NFT_TABLE_F_OWNER;
+ continue;
+ }
__nft_release_hook(net, table);
list_del_rcu(&table->list);
to_delete[deleted++] = table;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 5cf38fc0a366..00f4bd21c59b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -225,6 +225,148 @@ find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
return entry;
}
+static unsigned int nf_iterate(struct sk_buff *skb,
+ struct nf_hook_state *state,
+ const struct nf_hook_entries *hooks,
+ unsigned int *index)
+{
+ const struct nf_hook_entry *hook;
+ unsigned int verdict, i = *index;
+
+ while (i < hooks->num_hook_entries) {
+ hook = &hooks->hooks[i];
+repeat:
+ verdict = nf_hook_entry_hookfn(hook, skb, state);
+ if (verdict != NF_ACCEPT) {
+ *index = i;
+ if (verdict != NF_REPEAT)
+ return verdict;
+ goto repeat;
+ }
+ i++;
+ }
+
+ *index = i;
+ return NF_ACCEPT;
+}
+
+static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
+{
+ switch (pf) {
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ case NFPROTO_BRIDGE:
+ return rcu_dereference(net->nf.hooks_bridge[hooknum]);
+#endif
+ case NFPROTO_IPV4:
+ return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
+ case NFPROTO_IPV6:
+ return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
+{
+#ifdef CONFIG_INET
+ const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (!(iph->tos == rt_info->tos &&
+ skb->mark == rt_info->mark &&
+ iph->daddr == rt_info->daddr &&
+ iph->saddr == rt_info->saddr))
+ return ip_route_me_harder(entry->state.net, entry->state.sk,
+ skb, RTN_UNSPEC);
+ }
+#endif
+ return 0;
+}
+
+static int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
+{
+ const struct nf_ipv6_ops *v6ops;
+ int ret = 0;
+
+ switch (entry->state.pf) {
+ case AF_INET:
+ ret = nf_ip_reroute(skb, entry);
+ break;
+ case AF_INET6:
+ v6ops = rcu_dereference(nf_ipv6_ops);
+ if (v6ops)
+ ret = v6ops->reroute(skb, entry);
+ break;
+ }
+ return ret;
+}
+
+/* caller must hold rcu read-side lock */
+static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
+{
+ const struct nf_hook_entry *hook_entry;
+ const struct nf_hook_entries *hooks;
+ struct sk_buff *skb = entry->skb;
+ const struct net *net;
+ unsigned int i;
+ int err;
+ u8 pf;
+
+ net = entry->state.net;
+ pf = entry->state.pf;
+
+ hooks = nf_hook_entries_head(net, pf, entry->state.hook);
+
+ i = entry->hook_index;
+ if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP);
+ nf_queue_entry_free(entry);
+ return;
+ }
+
+ hook_entry = &hooks->hooks[i];
+
+ /* Continue traversal iff userspace said ok... */
+ if (verdict == NF_REPEAT)
+ verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
+
+ if (verdict == NF_ACCEPT) {
+ if (nf_reroute(skb, entry) < 0)
+ verdict = NF_DROP;
+ }
+
+ if (verdict == NF_ACCEPT) {
+next_hook:
+ ++i;
+ verdict = nf_iterate(skb, &entry->state, hooks, &i);
+ }
+
+ switch (verdict & NF_VERDICT_MASK) {
+ case NF_ACCEPT:
+ case NF_STOP:
+ local_bh_disable();
+ entry->state.okfn(entry->state.net, entry->state.sk, skb);
+ local_bh_enable();
+ break;
+ case NF_QUEUE:
+ err = nf_queue(skb, &entry->state, i, verdict);
+ if (err == 1)
+ goto next_hook;
+ break;
+ case NF_STOLEN:
+ break;
+ default:
+ kfree_skb(skb);
+ }
+
+ nf_queue_entry_free(entry);
+}
+
static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_ct_hook *ct_hook;
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 7f61506e5b44..7fec57ff736f 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -63,7 +63,6 @@ static int nft_osf_init(const struct nft_ctx *ctx,
{
struct nft_osf *priv = nft_expr_priv(expr);
u32 flags;
- int err;
u8 ttl;
if (!tb[NFTA_OSF_DREG])
@@ -83,13 +82,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
priv->flags = flags;
}
- err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
- NULL, NFT_DATA_VALUE,
- NFT_OSF_MAXGENRELEN);
- if (err < 0)
- return err;
-
- return 0;
+ return nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE,
+ NFT_OSF_MAXGENRELEN);
}
static int nft_osf_dump(struct sk_buff *skb,
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index aa1d9e93a9a0..c0ceea068936 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -359,11 +359,13 @@
*
* Return: -1 on no match, bit position on 'match_only', 0 otherwise.
*/
-int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
- union nft_pipapo_map_bucket *mt, bool match_only)
+int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ unsigned long *dst,
+ const union nft_pipapo_map_bucket *mt, bool match_only)
{
unsigned long bitset;
- int k, ret = -1;
+ unsigned int k;
+ int ret = -1;
for (k = 0; k < len; k++) {
bitset = map[k];
@@ -412,9 +414,9 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
struct nft_pipapo_scratch *scratch;
unsigned long *res_map, *fill_map;
u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
bool map_index;
int i;
@@ -505,6 +507,7 @@ out:
* @data: Key data to be matched against existing elements
* @genmask: If set, check that element is active in given genmask
* @tstamp: timestamp to check for expired elements
+ * @gfp: the type of memory to allocate (see kmalloc).
*
* This is essentially the same as the lookup function, except that it matches
* key data against the uncommitted copy and doesn't use preallocated maps for
@@ -515,22 +518,26 @@ out:
static struct nft_pipapo_elem *pipapo_get(const struct net *net,
const struct nft_set *set,
const u8 *data, u8 genmask,
- u64 tstamp)
+ u64 tstamp, gfp_t gfp)
{
struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
struct nft_pipapo *priv = nft_set_priv(set);
- struct nft_pipapo_match *m = priv->clone;
unsigned long *res_map, *fill_map = NULL;
- struct nft_pipapo_field *f;
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
int i;
- res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ m = priv->clone;
+ if (m->bsize_max == 0)
+ return ret;
+
+ res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), gfp);
if (!res_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
}
- fill_map = kcalloc(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ fill_map = kcalloc(m->bsize_max, sizeof(*res_map), gfp);
if (!fill_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
@@ -608,7 +615,8 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
struct nft_pipapo_elem *e;
e = pipapo_get(net, set, (const u8 *)elem->key.val.data,
- nft_genmask_cur(net), get_jiffies_64());
+ nft_genmask_cur(net), get_jiffies_64(),
+ GFP_ATOMIC);
if (IS_ERR(e))
return ERR_CAST(e);
@@ -616,6 +624,65 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
}
/**
+ * pipapo_realloc_mt() - Reallocate mapping table if needed upon resize
+ * @f: Field containing mapping table
+ * @old_rules: Amount of existing mapped rules
+ * @rules: Amount of new rules to map
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int pipapo_realloc_mt(struct nft_pipapo_field *f,
+ unsigned int old_rules, unsigned int rules)
+{
+ union nft_pipapo_map_bucket *new_mt = NULL, *old_mt = f->mt;
+ const unsigned int extra = PAGE_SIZE / sizeof(*new_mt);
+ unsigned int rules_alloc = rules;
+
+ might_sleep();
+
+ if (unlikely(rules == 0))
+ goto out_free;
+
+ /* growing and enough space left, no action needed */
+ if (rules > old_rules && f->rules_alloc > rules)
+ return 0;
+
+ /* downsize and extra slack has not grown too large */
+ if (rules < old_rules) {
+ unsigned int remove = f->rules_alloc - rules;
+
+ if (remove < (2u * extra))
+ return 0;
+ }
+
+ /* If set needs more than one page of memory for rules then
+ * allocate another extra page to avoid frequent reallocation.
+ */
+ if (rules > extra &&
+ check_add_overflow(rules, extra, &rules_alloc))
+ return -EOVERFLOW;
+
+ new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL);
+ if (!new_mt)
+ return -ENOMEM;
+
+ if (old_mt)
+ memcpy(new_mt, old_mt, min(old_rules, rules) * sizeof(*new_mt));
+
+ if (rules > old_rules) {
+ memset(new_mt + old_rules, 0,
+ (rules - old_rules) * sizeof(*new_mt));
+ }
+out_free:
+ f->rules_alloc = rules_alloc;
+ f->mt = new_mt;
+
+ kvfree(old_mt);
+
+ return 0;
+}
+
+/**
* pipapo_resize() - Resize lookup or mapping table, or both
* @f: Field containing lookup and mapping tables
* @old_rules: Previous amount of rules in field
@@ -627,12 +694,15 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
*
* Return: 0 on success, -ENOMEM on allocation failure.
*/
-static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
+static int pipapo_resize(struct nft_pipapo_field *f,
+ unsigned int old_rules, unsigned int rules)
{
long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
- union nft_pipapo_map_bucket *new_mt, *old_mt = f->mt;
- size_t new_bucket_size, copy;
- int group, bucket;
+ unsigned int new_bucket_size, copy;
+ int group, bucket, err;
+
+ if (rules >= NFT_PIPAPO_RULE0_MAX)
+ return -ENOSPC;
new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
#ifdef NFT_PIPAPO_ALIGN
@@ -672,27 +742,18 @@ static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
}
mt:
- new_mt = kvmalloc(rules * sizeof(*new_mt), GFP_KERNEL);
- if (!new_mt) {
+ err = pipapo_realloc_mt(f, old_rules, rules);
+ if (err) {
kvfree(new_lt);
- return -ENOMEM;
- }
-
- memcpy(new_mt, f->mt, min(old_rules, rules) * sizeof(*new_mt));
- if (rules > old_rules) {
- memset(new_mt + old_rules, 0,
- (rules - old_rules) * sizeof(*new_mt));
+ return err;
}
if (new_lt) {
f->bsize = new_bucket_size;
- NFT_PIPAPO_LT_ASSIGN(f, new_lt);
+ f->lt = new_lt;
kvfree(old_lt);
}
- f->mt = new_mt;
- kvfree(old_mt);
-
return 0;
}
@@ -843,8 +904,8 @@ static void pipapo_lt_8b_to_4b(int old_groups, int bsize,
*/
static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
{
+ unsigned int groups, bb;
unsigned long *new_lt;
- int groups, bb;
size_t lt_size;
lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
@@ -894,7 +955,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
f->groups = groups;
f->bb = bb;
kvfree(f->lt);
- NFT_PIPAPO_LT_ASSIGN(f, new_lt);
+ f->lt = new_lt;
}
/**
@@ -911,7 +972,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
int mask_bits)
{
- int rule = f->rules, group, ret, bit_offset = 0;
+ unsigned int rule = f->rules, group, ret, bit_offset = 0;
ret = pipapo_resize(f, f->rules, f->rules + 1);
if (ret)
@@ -1216,7 +1277,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
else
end = start;
- dup = pipapo_get(net, set, start, genmask, tstamp);
+ dup = pipapo_get(net, set, start, genmask, tstamp, GFP_KERNEL);
if (!IS_ERR(dup)) {
/* Check if we already have the same exact entry */
const struct nft_data *dup_key, *dup_end;
@@ -1238,7 +1299,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
if (PTR_ERR(dup) == -ENOENT) {
/* Look for partially overlapping entries */
- dup = pipapo_get(net, set, end, nft_genmask_next(net), tstamp);
+ dup = pipapo_get(net, set, end, nft_genmask_next(net), tstamp,
+ GFP_KERNEL);
}
if (PTR_ERR(dup) != -ENOENT) {
@@ -1251,8 +1313,14 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
/* Validate */
start_p = start;
end_p = end;
+
+ /* some helpers return -1, or 0 >= for valid rule pos,
+ * so we cannot support more than INT_MAX rules at this time.
+ */
+ BUILD_BUG_ON(NFT_PIPAPO_RULE0_MAX > INT_MAX);
+
nft_pipapo_for_each_field(f, i, m) {
- if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
+ if (f->rules >= NFT_PIPAPO_RULE0_MAX)
return -ENOSPC;
if (memcmp(start_p, end_p,
@@ -1358,18 +1426,25 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
if (!new_lt)
goto out_lt;
- NFT_PIPAPO_LT_ASSIGN(dst, new_lt);
+ dst->lt = new_lt;
memcpy(NFT_PIPAPO_LT_ALIGN(new_lt),
NFT_PIPAPO_LT_ALIGN(src->lt),
src->bsize * sizeof(*dst->lt) *
src->groups * NFT_PIPAPO_BUCKETS(src->bb));
- dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
- if (!dst->mt)
- goto out_mt;
+ if (src->rules > 0) {
+ dst->mt = kvmalloc_array(src->rules_alloc,
+ sizeof(*src->mt), GFP_KERNEL);
+ if (!dst->mt)
+ goto out_mt;
+
+ memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
+ } else {
+ dst->mt = NULL;
+ dst->rules_alloc = 0;
+ }
- memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
src++;
dst++;
}
@@ -1423,10 +1498,10 @@ out_scratch:
*
* Return: Number of rules that originated from the same entry as @first.
*/
-static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
+static unsigned int pipapo_rules_same_key(struct nft_pipapo_field *f, unsigned int first)
{
struct nft_pipapo_elem *e = NULL; /* Keep gcc happy */
- int r;
+ unsigned int r;
for (r = first; r < f->rules; r++) {
if (r != first && e != f->mt[r].e)
@@ -1479,8 +1554,9 @@ static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
* 0 1 2
* element pointers: 0x42 0x42 0x44
*/
-static void pipapo_unmap(union nft_pipapo_map_bucket *mt, int rules,
- int start, int n, int to_offset, bool is_last)
+static void pipapo_unmap(union nft_pipapo_map_bucket *mt, unsigned int rules,
+ unsigned int start, unsigned int n,
+ unsigned int to_offset, bool is_last)
{
int i;
@@ -1586,8 +1662,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
{
struct nft_pipapo *priv = nft_set_priv(set);
struct net *net = read_pnet(&set->net);
+ unsigned int rules_f0, first_rule = 0;
u64 tstamp = nft_net_tstamp(net);
- int rules_f0, first_rule = 0;
struct nft_pipapo_elem *e;
struct nft_trans_gc *gc;
@@ -1597,8 +1673,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
- struct nft_pipapo_field *f;
- int i, start, rules_fx;
+ const struct nft_pipapo_field *f;
+ unsigned int i, start, rules_fx;
start = first_rule;
rules_fx = rules_f0;
@@ -1792,7 +1868,8 @@ static void *pipapo_deactivate(const struct net *net, const struct nft_set *set,
{
struct nft_pipapo_elem *e;
- e = pipapo_get(net, set, data, nft_genmask_next(net), nft_net_tstamp(net));
+ e = pipapo_get(net, set, data, nft_genmask_next(net),
+ nft_net_tstamp(net), GFP_KERNEL);
if (IS_ERR(e))
return NULL;
@@ -1976,7 +2053,7 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m = priv->clone;
- int rules_f0, first_rule = 0;
+ unsigned int rules_f0, first_rule = 0;
struct nft_pipapo_elem *e;
const u8 *data;
@@ -2039,9 +2116,9 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
{
struct nft_pipapo *priv = nft_set_priv(set);
struct net *net = read_pnet(&set->net);
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
- int i, r;
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+ unsigned int i, r;
rcu_read_lock();
if (iter->genmask == nft_genmask_cur(net))
@@ -2145,6 +2222,9 @@ static int nft_pipapo_init(const struct nft_set *set,
field_count = desc->field_count ? : 1;
+ BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS > 255);
+ BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS != NFT_REG32_COUNT);
+
if (field_count > NFT_PIPAPO_MAX_FIELDS)
return -EINVAL;
@@ -2166,7 +2246,11 @@ static int nft_pipapo_init(const struct nft_set *set,
rcu_head_init(&m->rcu);
nft_pipapo_for_each_field(f, i, m) {
- int len = desc->field_len[i] ? : set->klen;
+ unsigned int len = desc->field_len[i] ? : set->klen;
+
+ /* f->groups is u8 */
+ BUILD_BUG_ON((NFT_PIPAPO_MAX_BYTES *
+ BITS_PER_BYTE / NFT_PIPAPO_GROUP_BITS_LARGE_SET) >= 256);
f->bb = NFT_PIPAPO_GROUP_BITS_INIT;
f->groups = len * NFT_PIPAPO_GROUPS_PER_BYTE(f);
@@ -2175,7 +2259,8 @@ static int nft_pipapo_init(const struct nft_set *set,
f->bsize = 0;
f->rules = 0;
- NFT_PIPAPO_LT_ASSIGN(f, NULL);
+ f->rules_alloc = 0;
+ f->lt = NULL;
f->mt = NULL;
}
@@ -2211,7 +2296,7 @@ static void nft_set_pipapo_match_destroy(const struct nft_ctx *ctx,
struct nft_pipapo_match *m)
{
struct nft_pipapo_field *f;
- int i, r;
+ unsigned int i, r;
for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
;
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
index 3842c7341a9f..24cd1ff73f98 100644
--- a/net/netfilter/nft_set_pipapo.h
+++ b/net/netfilter/nft_set_pipapo.h
@@ -70,15 +70,9 @@
#define NFT_PIPAPO_ALIGN_HEADROOM \
(NFT_PIPAPO_ALIGN - ARCH_KMALLOC_MINALIGN)
#define NFT_PIPAPO_LT_ALIGN(lt) (PTR_ALIGN((lt), NFT_PIPAPO_ALIGN))
-#define NFT_PIPAPO_LT_ASSIGN(field, x) \
- do { \
- (field)->lt_aligned = NFT_PIPAPO_LT_ALIGN(x); \
- (field)->lt = (x); \
- } while (0)
#else
#define NFT_PIPAPO_ALIGN_HEADROOM 0
#define NFT_PIPAPO_LT_ALIGN(lt) (lt)
-#define NFT_PIPAPO_LT_ASSIGN(field, x) ((field)->lt = (x))
#endif /* NFT_PIPAPO_ALIGN */
#define nft_pipapo_for_each_field(field, index, match) \
@@ -110,22 +104,20 @@ union nft_pipapo_map_bucket {
/**
* struct nft_pipapo_field - Lookup, mapping tables and related data for a field
- * @groups: Amount of bit groups
* @rules: Number of inserted rules
* @bsize: Size of each bucket in lookup table, in longs
+ * @rules_alloc: Number of allocated rules, always >= rules
+ * @groups: Amount of bit groups
* @bb: Number of bits grouped together in lookup table buckets
* @lt: Lookup table: 'groups' rows of buckets
- * @lt_aligned: Version of @lt aligned to NFT_PIPAPO_ALIGN bytes
* @mt: Mapping table: one bucket per rule
*/
struct nft_pipapo_field {
- int groups;
- unsigned long rules;
- size_t bsize;
- int bb;
-#ifdef NFT_PIPAPO_ALIGN
- unsigned long *lt_aligned;
-#endif
+ unsigned int rules;
+ unsigned int bsize;
+ unsigned int rules_alloc;
+ u8 groups;
+ u8 bb;
unsigned long *lt;
union nft_pipapo_map_bucket *mt;
};
@@ -145,15 +137,15 @@ struct nft_pipapo_scratch {
/**
* struct nft_pipapo_match - Data used for lookup and matching
* @field_count: Amount of fields in set
- * @scratch: Preallocated per-CPU maps for partial matching results
* @bsize_max: Maximum lookup table bucket size of all fields, in longs
+ * @scratch: Preallocated per-CPU maps for partial matching results
* @rcu: Matching data is swapped on commits
* @f: Fields, with lookup and mapping tables
*/
struct nft_pipapo_match {
- int field_count;
+ u8 field_count;
+ unsigned int bsize_max;
struct nft_pipapo_scratch * __percpu *scratch;
- size_t bsize_max;
struct rcu_head rcu;
struct nft_pipapo_field f[] __counted_by(field_count);
};
@@ -186,8 +178,9 @@ struct nft_pipapo_elem {
struct nft_set_ext ext;
};
-int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
- union nft_pipapo_map_bucket *mt, bool match_only);
+int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ unsigned long *dst,
+ const union nft_pipapo_map_bucket *mt, bool match_only);
/**
* pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
@@ -195,7 +188,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
-static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_4bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
@@ -223,7 +216,7 @@ static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
-static inline void pipapo_and_field_buckets_8bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_8bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index a3a8ddca9918..d08407d589ea 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -212,8 +212,9 @@ static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
@@ -274,8 +275,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
@@ -350,8 +352,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[8] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -445,8 +448,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[12] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -534,8 +538,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[32] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -669,8 +674,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -726,8 +732,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -790,8 +797,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -865,8 +873,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -950,8 +959,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -1042,8 +1052,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
unsigned long bsize = f->bsize;
int i, ret = -1, b;
@@ -1119,9 +1130,9 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_scratch *scratch;
u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
unsigned long *res, *fill;
bool map_index;
int i, ret = 0;
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
index acef4155f0da..008419db815a 100644
--- a/net/netfilter/utils.c
+++ b/net/netfilter/utils.c
@@ -179,43 +179,6 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
}
EXPORT_SYMBOL_GPL(nf_route);
-static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
-{
-#ifdef CONFIG_INET
- const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
- if (entry->state.hook == NF_INET_LOCAL_OUT) {
- const struct iphdr *iph = ip_hdr(skb);
-
- if (!(iph->tos == rt_info->tos &&
- skb->mark == rt_info->mark &&
- iph->daddr == rt_info->daddr &&
- iph->saddr == rt_info->saddr))
- return ip_route_me_harder(entry->state.net, entry->state.sk,
- skb, RTN_UNSPEC);
- }
-#endif
- return 0;
-}
-
-int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
-{
- const struct nf_ipv6_ops *v6ops;
- int ret = 0;
-
- switch (entry->state.pf) {
- case AF_INET:
- ret = nf_ip_reroute(skb, entry);
- break;
- case AF_INET6:
- v6ops = rcu_dereference(nf_ipv6_ops);
- if (v6ops)
- ret = v6ops->reroute(skb, entry);
- break;
- }
- return ret;
-}
-
/* Only get and check the lengths, not do any hop-by-hop stuff. */
int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen)
{
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 21624d68314f..da5d929c7c85 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1142,7 +1142,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
if (target->compat_from_user)
target->compat_from_user(t->data, ct->data);
else
- memcpy(t->data, ct->data, tsize - sizeof(*ct));
+ unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct),
+ /* UAPI 0-sized destination */);
tsize += off;
t->u.user.target_size = tsize;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 27511c90a26f..1ba4f58e1d35 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -610,7 +610,7 @@ int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset)
struct netlbl_lsm_catmap *iter;
u32 idx;
u32 bit;
- NETLBL_CATMAP_MAPTYPE bitmap;
+ u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
@@ -666,8 +666,8 @@ int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap, u32 offset)
struct netlbl_lsm_catmap *prev = NULL;
u32 idx;
u32 bit;
- NETLBL_CATMAP_MAPTYPE bitmask;
- NETLBL_CATMAP_MAPTYPE bitmap;
+ u64 bitmask;
+ u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
@@ -857,7 +857,7 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
offset -= iter->startbit;
idx = offset / NETLBL_CATMAP_MAPSIZE;
- iter->bitmap[idx] |= (NETLBL_CATMAP_MAPTYPE)bitmap
+ iter->bitmap[idx] |= (u64)bitmap
<< (offset % NETLBL_CATMAP_MAPSIZE);
return 0;
@@ -876,7 +876,7 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
* Description:
* Starting at @offset, walk the bitmap from left to right until either the
* desired bit is found or we reach the end. Return the bit offset, -1 if
- * not found, or -2 if error.
+ * not found.
*/
int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
u32 offset, u8 state)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ff315351269f..7554803218a2 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -130,7 +130,7 @@ static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
"nlk_cb_mutex-MAX_LINKS"
};
-static int netlink_dump(struct sock *sk);
+static int netlink_dump(struct sock *sk, bool lock_taken);
/* nl_table locking explained:
* Lookup and traversal are protected with an RCU read-side lock. Insertion
@@ -636,7 +636,7 @@ static struct proto netlink_proto = {
};
static int __netlink_create(struct net *net, struct socket *sock,
- struct mutex *cb_mutex, int protocol,
+ struct mutex *dump_cb_mutex, int protocol,
int kern)
{
struct sock *sk;
@@ -651,15 +651,11 @@ static int __netlink_create(struct net *net, struct socket *sock,
sock_init_data(sock, sk);
nlk = nlk_sk(sk);
- if (cb_mutex) {
- nlk->cb_mutex = cb_mutex;
- } else {
- nlk->cb_mutex = &nlk->cb_def_mutex;
- mutex_init(nlk->cb_mutex);
- lockdep_set_class_and_name(nlk->cb_mutex,
+ mutex_init(&nlk->nl_cb_mutex);
+ lockdep_set_class_and_name(&nlk->nl_cb_mutex,
nlk_cb_mutex_keys + protocol,
nlk_cb_mutex_key_strings[protocol]);
- }
+ nlk->dump_cb_mutex = dump_cb_mutex;
init_waitqueue_head(&nlk->wait);
sk->sk_destruct = netlink_sock_destruct;
@@ -1206,23 +1202,21 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast)
{
+ size_t head_size = SKB_HEAD_ALIGN(size);
struct sk_buff *skb;
void *data;
- if (size <= NLMSG_GOODSIZE || broadcast)
+ if (head_size <= PAGE_SIZE || broadcast)
return alloc_skb(size, GFP_KERNEL);
- size = SKB_DATA_ALIGN(size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- data = vmalloc(size);
- if (data == NULL)
+ data = kvmalloc(head_size, GFP_KERNEL);
+ if (!data)
return NULL;
- skb = __build_skb(data, size);
- if (skb == NULL)
- vfree(data);
- else
+ skb = __build_skb(data, head_size);
+ if (!skb)
+ kvfree(data);
+ else if (is_vmalloc_addr(data))
skb->destructor = netlink_skb_destructor;
return skb;
@@ -1779,6 +1773,9 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
netlink_unlock_table();
return err;
}
+ case NETLINK_LISTEN_ALL_NSID:
+ flag = NETLINK_F_LISTEN_ALL_NSID;
+ break;
case NETLINK_CAP_ACK:
flag = NETLINK_F_CAP_ACK;
break;
@@ -1987,7 +1984,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (READ_ONCE(nlk->cb_running) &&
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
- ret = netlink_dump(sk);
+ ret = netlink_dump(sk, false);
if (ret) {
WRITE_ONCE(sk->sk_err, -ret);
sk_error_report(sk);
@@ -2196,7 +2193,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
return 0;
}
-static int netlink_dump(struct sock *sk)
+static int netlink_dump(struct sock *sk, bool lock_taken)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ext_ack extack = {};
@@ -2208,7 +2205,8 @@ static int netlink_dump(struct sock *sk)
int alloc_min_size;
int alloc_size;
- mutex_lock(nlk->cb_mutex);
+ if (!lock_taken)
+ mutex_lock(&nlk->nl_cb_mutex);
if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
@@ -2260,14 +2258,33 @@ static int netlink_dump(struct sock *sk)
netlink_skb_set_owner_r(skb, sk);
if (nlk->dump_done_errno > 0) {
+ struct mutex *extra_mutex = nlk->dump_cb_mutex;
+
cb->extack = &extack;
+
+ if (cb->flags & RTNL_FLAG_DUMP_UNLOCKED)
+ extra_mutex = NULL;
+ if (extra_mutex)
+ mutex_lock(extra_mutex);
nlk->dump_done_errno = cb->dump(skb, cb);
+ if (extra_mutex)
+ mutex_unlock(extra_mutex);
+
+ /* EMSGSIZE plus something already in the skb means
+ * that there's more to dump but current skb has filled up.
+ * If the callback really wants to return EMSGSIZE to user space
+ * it needs to do so again, on the next cb->dump() call,
+ * without putting data in the skb.
+ */
+ if (nlk->dump_done_errno == -EMSGSIZE && skb->len)
+ nlk->dump_done_errno = skb->len;
+
cb->extack = NULL;
}
if (nlk->dump_done_errno > 0 ||
skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
if (sk_filter(sk, skb))
kfree_skb(skb);
@@ -2301,13 +2318,13 @@ static int netlink_dump(struct sock *sk)
WRITE_ONCE(nlk->cb_running, false);
module = cb->module;
skb = cb->skb;
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
module_put(module);
consume_skb(skb);
return 0;
errout_skb:
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
kfree_skb(skb);
return err;
}
@@ -2330,7 +2347,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
}
nlk = nlk_sk(sk);
- mutex_lock(nlk->cb_mutex);
+ mutex_lock(&nlk->nl_cb_mutex);
/* A dump is in progress... */
if (nlk->cb_running) {
ret = -EBUSY;
@@ -2350,6 +2367,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->data = control->data;
cb->module = control->module;
cb->min_dump_alloc = control->min_dump_alloc;
+ cb->flags = control->flags;
cb->skb = skb;
cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
@@ -2365,9 +2383,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
WRITE_ONCE(nlk->cb_running, true);
nlk->dump_done_errno = INT_MAX;
- mutex_unlock(nlk->cb_mutex);
-
- ret = netlink_dump(sk);
+ ret = netlink_dump(sk, true);
sock_put(sk);
@@ -2383,7 +2399,7 @@ error_put:
module_put(control->module);
error_unlock:
sock_put(sk);
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
error_free:
kfree_skb(skb);
return ret;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 2145979b9986..9751e29d4bbb 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -39,8 +39,9 @@ struct netlink_sock {
bool cb_running;
int dump_done_errno;
struct netlink_callback cb;
- struct mutex *cb_mutex;
- struct mutex cb_def_mutex;
+ struct mutex nl_cb_mutex;
+
+ struct mutex *dump_cb_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
int (*netlink_bind)(struct net *net, int group);
void (*netlink_unbind)(struct net *net, int group);
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 1eeff9422856..61981e01fd6f 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -207,7 +207,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
}
- return err < 0 ? err : skb->len;
+ return err <= 0 ? err : skb->len;
}
static int netlink_diag_dump_done(struct netlink_callback *cb)
@@ -241,6 +241,7 @@ static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler netlink_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_NETLINK,
.dump = netlink_diag_handler_dump,
};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8c7af02f8454..3b7666944b11 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1232,7 +1232,7 @@ static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
if (hdr == NULL)
- return -1;
+ return -EMSGSIZE;
if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
@@ -1355,6 +1355,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
int fams_to_skip = cb->args[0];
unsigned int id;
+ int err = 0;
idr_for_each_entry(&genl_fam_idr, rt, id) {
if (!rt->netnsok && !net_eq(net, &init_net))
@@ -1363,16 +1364,17 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
if (n++ < fams_to_skip)
continue;
- if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- skb, CTRL_CMD_NEWFAMILY) < 0) {
+ err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ skb, CTRL_CMD_NEWFAMILY);
+ if (err) {
n--;
break;
}
}
cb->args[0] = n;
- return skb->len;
+ return err;
}
static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
@@ -1836,6 +1838,9 @@ static int genl_bind(struct net *net, int group)
!ns_capable(net->user_ns, CAP_SYS_ADMIN))
ret = -EPERM;
+ if (family->bind)
+ family->bind(i);
+
break;
}
@@ -1843,12 +1848,39 @@ static int genl_bind(struct net *net, int group)
return ret;
}
+static void genl_unbind(struct net *net, int group)
+{
+ const struct genl_family *family;
+ unsigned int id;
+
+ down_read(&cb_lock);
+
+ idr_for_each_entry(&genl_fam_idr, family, id) {
+ int i;
+
+ if (family->n_mcgrps == 0)
+ continue;
+
+ i = group - family->mcgrp_offset;
+ if (i < 0 || i >= family->n_mcgrps)
+ continue;
+
+ if (family->unbind)
+ family->unbind(i);
+
+ break;
+ }
+
+ up_read(&cb_lock);
+}
+
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
.bind = genl_bind,
+ .unbind = genl_unbind,
.release = genl_release,
};
diff --git a/net/nfc/core.c b/net/nfc/core.c
index eb2c0959e5b6..e58dc6405054 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -1015,7 +1015,7 @@ static void nfc_check_pres_timeout(struct timer_list *t)
schedule_work(&dev->check_pres_work);
}
-struct class nfc_class = {
+const struct class nfc_class = {
.name = "nfc",
.dev_release = nfc_release,
};
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
index 2140f6724644..ba91284f4086 100644
--- a/net/nfc/hci/llc.c
+++ b/net/nfc/hci/llc.c
@@ -30,15 +30,19 @@ exit:
return r;
}
+static void nfc_llc_del_engine(struct nfc_llc_engine *llc_engine)
+{
+ list_del(&llc_engine->entry);
+ kfree_const(llc_engine->name);
+ kfree(llc_engine);
+}
+
void nfc_llc_exit(void)
{
struct nfc_llc_engine *llc_engine, *n;
- list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) {
- list_del(&llc_engine->entry);
- kfree(llc_engine->name);
- kfree(llc_engine);
- }
+ list_for_each_entry_safe(llc_engine, n, &llc_engines, entry)
+ nfc_llc_del_engine(llc_engine);
}
int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops)
@@ -49,7 +53,7 @@ int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops)
if (llc_engine == NULL)
return -ENOMEM;
- llc_engine->name = kstrdup(name, GFP_KERNEL);
+ llc_engine->name = kstrdup_const(name, GFP_KERNEL);
if (llc_engine->name == NULL) {
kfree(llc_engine);
return -ENOMEM;
@@ -82,9 +86,7 @@ void nfc_llc_unregister(const char *name)
if (llc_engine == NULL)
return;
- list_del(&llc_engine->entry);
- kfree(llc_engine->name);
- kfree(llc_engine);
+ nfc_llc_del_engine(llc_engine);
}
struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9bbc2686690..61270826b9ac 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2057,7 +2057,7 @@ retry:
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = READ_ONCE(sk->sk_mark);
skb->tstamp = sockc.transmit_time;
-
+ skb->mono_delivery_time = !!skb->tstamp;
skb_setup_tx_timestamp(skb, sockc.tsflags);
if (unlikely(extra_len == 4))
@@ -2318,7 +2318,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
- if (po->copy_thresh &&
+ if (READ_ONCE(po->copy_thresh) &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
@@ -2586,6 +2586,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->priority = READ_ONCE(po->sk.sk_priority);
skb->mark = READ_ONCE(po->sk.sk_mark);
skb->tstamp = sockc->transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
skb_setup_tx_timestamp(skb, sockc->tsflags);
skb_zcopy_set_nouarg(skb, ph.raw);
@@ -3064,6 +3065,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc.mark;
skb->tstamp = sockc.transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
@@ -3834,7 +3836,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
- pkt_sk(sk)->copy_thresh = val;
+ WRITE_ONCE(pkt_sk(sk)->copy_thresh, val);
return 0;
}
case PACKET_VERSION:
@@ -4088,6 +4090,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_VNET_HDR_SZ:
val = READ_ONCE(po->vnet_hdr_sz);
break;
+ case PACKET_COPY_THRESH:
+ val = READ_ONCE(pkt_sk(sk)->copy_thresh);
+ break;
case PACKET_VERSION:
val = po->tp_version;
break;
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 9a7980e3309d..47f69f3dbf73 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -17,7 +17,7 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
pinfo.pdi_index = po->ifindex;
pinfo.pdi_version = po->tp_version;
pinfo.pdi_reserve = po->tp_reserve;
- pinfo.pdi_copy_thresh = po->copy_thresh;
+ pinfo.pdi_copy_thresh = READ_ONCE(po->copy_thresh);
pinfo.pdi_tstamp = READ_ONCE(po->tp_tstamp);
pinfo.pdi_flags = 0;
@@ -245,6 +245,7 @@ static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler packet_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_PACKET,
.dump = packet_diag_handler_dump,
};
diff --git a/net/rds/connection.c b/net/rds/connection.c
index b4cc699c5fad..c749c5525b40 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -829,9 +829,7 @@ int rds_conn_init(void)
if (ret)
return ret;
- rds_conn_slab = kmem_cache_create("rds_connection",
- sizeof(struct rds_connection),
- 0, 0, NULL);
+ rds_conn_slab = KMEM_CACHE(rds_connection, 0);
if (!rds_conn_slab) {
rds_loop_net_exit();
return -ENOMEM;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 465bfe5eb061..5222bc97d192 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
* rxrpc_kernel_set_max_life - Set maximum lifespan on a call
* @sock: The socket the call is on
* @call: The call to configure
- * @hard_timeout: The maximum lifespan of the call in jiffies
+ * @hard_timeout: The maximum lifespan of the call in ms
*
* Set the maximum lifespan of a call. The call will end with ETIME or
* ETIMEDOUT if it takes longer than this.
@@ -495,14 +495,14 @@ EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call,
unsigned long hard_timeout)
{
- unsigned long now;
+ ktime_t delay = ms_to_ktime(hard_timeout), expect_term_by;
mutex_lock(&call->user_mutex);
- now = jiffies;
- hard_timeout += now;
- WRITE_ONCE(call->expect_term_by, hard_timeout);
- rxrpc_reduce_call_timer(call, hard_timeout, now, rxrpc_timer_set_for_hard);
+ expect_term_by = ktime_add(ktime_get_real(), delay);
+ WRITE_ONCE(call->expect_term_by, expect_term_by);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
mutex_unlock(&call->user_mutex);
}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7818aae1be8e..08c0a32db8c7 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -198,8 +198,8 @@ struct rxrpc_host_header {
* - max 48 bytes (struct sk_buff::cb)
*/
struct rxrpc_skb_priv {
- struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
union {
+ struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
struct {
u16 offset; /* Offset of data */
u16 len; /* Length of data */
@@ -208,9 +208,12 @@ struct rxrpc_skb_priv {
};
struct {
rxrpc_seq_t first_ack; /* First packet in acks table */
+ rxrpc_seq_t prev_ack; /* Highest seq seen */
+ rxrpc_serial_t acked_serial; /* Packet in response to (or 0) */
+ u8 reason; /* Reason for ack */
u8 nr_acks; /* Number of acks+nacks */
u8 nr_nacks; /* Number of nacks */
- };
+ } ack;
};
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
};
@@ -248,10 +251,9 @@ struct rxrpc_security {
struct rxrpc_key_token *);
/* Work out how much data we can store in a packet, given an estimate
- * of the amount of data remaining.
+ * of the amount of data remaining and allocate a data buffer.
*/
- int (*how_much_data)(struct rxrpc_call *, size_t,
- size_t *, size_t *, size_t *);
+ struct rxrpc_txbuf *(*alloc_txbuf)(struct rxrpc_call *call, size_t remaining, gfp_t gfp);
/* impose security on a packet */
int (*secure_packet)(struct rxrpc_call *, struct rxrpc_txbuf *);
@@ -292,6 +294,7 @@ struct rxrpc_local {
struct socket *socket; /* my UDP socket */
struct task_struct *io_thread;
struct completion io_thread_ready; /* Indication that the I/O thread started */
+ struct page_frag_cache tx_alloc; /* Tx control packet allocation (I/O thread only) */
struct rxrpc_sock *service; /* Service(s) listening on this endpoint */
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
struct sk_buff_head rx_delay_queue; /* Delay injection queue */
@@ -352,8 +355,8 @@ struct rxrpc_peer {
u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
- u32 rto_j; /* Retransmission timeout in jiffies */
- u8 backoff; /* Backoff timeout */
+ u32 rto_us; /* Retransmission timeout in usec */
+ u8 backoff; /* Backoff timeout (as shift) */
u8 cong_ssthresh; /* Congestion slow-start threshold */
};
@@ -500,6 +503,8 @@ struct rxrpc_connection {
struct list_head proc_link; /* link in procfs list */
struct list_head link; /* link in master connection list */
struct sk_buff_head rx_queue; /* received conn-level packets */
+ struct page_frag_cache tx_data_alloc; /* Tx DATA packet allocation */
+ struct mutex tx_data_alloc_lock;
struct mutex security_lock; /* Lock for security management */
const struct rxrpc_security *security; /* applied security module */
@@ -618,17 +623,17 @@ struct rxrpc_call {
const struct rxrpc_security *security; /* applied security module */
struct mutex user_mutex; /* User access mutex */
struct sockaddr_rxrpc dest_srx; /* Destination address */
- unsigned long delay_ack_at; /* When DELAY ACK needs to happen */
- unsigned long ack_lost_at; /* When ACK is figured as lost */
- unsigned long resend_at; /* When next resend needs to happen */
- unsigned long ping_at; /* When next to send a ping */
- unsigned long keepalive_at; /* When next to send a keepalive ping */
- unsigned long expect_rx_by; /* When we expect to get a packet by */
- unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
- unsigned long expect_term_by; /* When we expect call termination by */
- u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
- u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
- u32 hard_timo; /* Maximum lifetime or 0 (jif) */
+ ktime_t delay_ack_at; /* When DELAY ACK needs to happen */
+ ktime_t ack_lost_at; /* When ACK is figured as lost */
+ ktime_t resend_at; /* When next resend needs to happen */
+ ktime_t ping_at; /* When next to send a ping */
+ ktime_t keepalive_at; /* When next to send a keepalive ping */
+ ktime_t expect_rx_by; /* When we expect to get a packet by */
+ ktime_t expect_req_by; /* When we expect to get a request DATA packet by */
+ ktime_t expect_term_by; /* When we expect call termination by */
+ u32 next_rx_timo; /* Timeout for next Rx packet (ms) */
+ u32 next_req_timo; /* Timeout for next Rx request packet (ms) */
+ u32 hard_timo; /* Maximum lifetime or 0 (s) */
struct timer_list timer; /* Combined event timer */
struct work_struct destroyer; /* In-process-context destroyer */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
@@ -673,7 +678,7 @@ struct rxrpc_call {
rxrpc_seq_t tx_transmitted; /* Highest packet transmitted */
rxrpc_seq_t tx_prepared; /* Highest Tx slot prepared. */
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
- u16 tx_backoff; /* Delay to insert due to Tx failure */
+ u16 tx_backoff; /* Delay to insert due to Tx failure (ms) */
u8 tx_winsize; /* Maximum size of Tx window */
#define RXRPC_TX_MAX_WINDOW 128
ktime_t tx_last_sent; /* Last time a transmission occurred */
@@ -788,40 +793,30 @@ struct rxrpc_send_params {
* Buffer of data to be output as a packet.
*/
struct rxrpc_txbuf {
- struct rcu_head rcu;
struct list_head call_link; /* Link in call->tx_sendmsg/tx_buffer */
struct list_head tx_link; /* Link in live Enc queue or Tx queue */
ktime_t last_sent; /* Time at which last transmitted */
refcount_t ref;
rxrpc_seq_t seq; /* Sequence number of this packet */
+ rxrpc_serial_t serial; /* Last serial number transmitted with */
unsigned int call_debug_id;
unsigned int debug_id;
unsigned int len; /* Amount of data in buffer */
unsigned int space; /* Remaining data space */
unsigned int offset; /* Offset of fill point */
- unsigned long flags;
-#define RXRPC_TXBUF_LAST 0 /* Set if last packet in Tx phase */
-#define RXRPC_TXBUF_RESENT 1 /* Set if has been resent */
+ unsigned int flags;
+#define RXRPC_TXBUF_WIRE_FLAGS 0xff /* The wire protocol flags */
+#define RXRPC_TXBUF_RESENT 0x100 /* Set if has been resent */
+ __be16 cksum; /* Checksum to go in header */
+ unsigned short ack_rwind; /* ACK receive window */
u8 /*enum rxrpc_propose_ack_trace*/ ack_why; /* If ack, why */
- struct {
- /* The packet for encrypting and DMA'ing. We align it such
- * that data[] aligns correctly for any crypto blocksize.
- */
- u8 pad[64 - sizeof(struct rxrpc_wire_header)];
- struct rxrpc_wire_header wire; /* Network-ready header */
- union {
- u8 data[RXRPC_JUMBO_DATALEN]; /* Data packet */
- struct {
- struct rxrpc_ackpacket ack;
- DECLARE_FLEX_ARRAY(u8, acks);
- };
- };
- } __aligned(64);
+ u8 nr_kvec; /* Amount of kvec[] used */
+ struct kvec kvec[3];
};
static inline bool rxrpc_sending_to_server(const struct rxrpc_txbuf *txb)
{
- return txb->wire.flags & RXRPC_CLIENT_INITIATED;
+ return txb->flags & RXRPC_CLIENT_INITIATED;
}
static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
@@ -869,17 +864,11 @@ int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
*/
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
enum rxrpc_propose_ack_trace why);
-void rxrpc_send_ACK(struct rxrpc_call *, u8, rxrpc_serial_t, enum rxrpc_propose_ack_trace);
void rxrpc_propose_delay_ACK(struct rxrpc_call *, rxrpc_serial_t,
enum rxrpc_propose_ack_trace);
void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *);
void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb);
-void rxrpc_reduce_call_timer(struct rxrpc_call *call,
- unsigned long expire_at,
- unsigned long now,
- enum rxrpc_timer_trace why);
-
bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
/*
@@ -1160,9 +1149,9 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
/*
* output.c
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
+void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
+ rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why);
int rxrpc_send_abort_packet(struct rxrpc_call *);
-int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *);
void rxrpc_send_conn_abort(struct rxrpc_connection *conn);
void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb);
void rxrpc_send_keepalive(struct rxrpc_peer *);
@@ -1223,7 +1212,7 @@ static inline int rxrpc_abort_eproto(struct rxrpc_call *call,
*/
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
-unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
+ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans);
void rxrpc_peer_init_rtt(struct rxrpc_peer *);
/*
@@ -1295,8 +1284,9 @@ static inline void rxrpc_sysctl_exit(void) {}
* txbuf.c
*/
extern atomic_t rxrpc_nr_txbuf;
-struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
- gfp_t gfp);
+struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
+ size_t data_align, gfp_t gfp);
+struct rxrpc_txbuf *rxrpc_alloc_ack_txbuf(struct rxrpc_call *call, size_t sack_size);
void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 0f78544d043b..7bbb68504766 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -23,14 +23,14 @@
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
enum rxrpc_propose_ack_trace why)
{
- unsigned long now = jiffies;
- unsigned long ping_at = now + rxrpc_idle_ack_delay;
-
- if (time_before(ping_at, call->ping_at)) {
- WRITE_ONCE(call->ping_at, ping_at);
- rxrpc_reduce_call_timer(call, ping_at, now,
- rxrpc_timer_set_for_ping);
- trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
+ ktime_t delay = ms_to_ktime(READ_ONCE(rxrpc_idle_ack_delay));
+ ktime_t now = ktime_get_real();
+ ktime_t ping_at = ktime_add(now, delay);
+
+ trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
+ if (ktime_before(ping_at, call->ping_at)) {
+ call->ping_at = ping_at;
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_ping);
}
}
@@ -40,62 +40,18 @@ void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
enum rxrpc_propose_ack_trace why)
{
- unsigned long expiry = rxrpc_soft_ack_delay;
- unsigned long now = jiffies, ack_at;
-
- if (rxrpc_soft_ack_delay < expiry)
- expiry = rxrpc_soft_ack_delay;
- if (call->peer->srtt_us != 0)
- ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
- else
- ack_at = expiry;
-
- ack_at += READ_ONCE(call->tx_backoff);
- ack_at += now;
- if (time_before(ack_at, call->delay_ack_at)) {
- WRITE_ONCE(call->delay_ack_at, ack_at);
- rxrpc_reduce_call_timer(call, ack_at, now,
- rxrpc_timer_set_for_ack);
- }
+ ktime_t now = ktime_get_real(), delay;
trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
-}
-
-/*
- * Queue an ACK for immediate transmission.
- */
-void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
- rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
-{
- struct rxrpc_txbuf *txb;
-
- if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
- return;
-
- rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
- txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_ACK,
- rcu_read_lock_held() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS);
- if (!txb) {
- kleave(" = -ENOMEM");
- return;
- }
+ if (call->peer->srtt_us)
+ delay = (call->peer->srtt_us >> 3) * NSEC_PER_USEC;
+ else
+ delay = ms_to_ktime(READ_ONCE(rxrpc_soft_ack_delay));
+ ktime_add_ms(delay, call->tx_backoff);
- txb->ack_why = why;
- txb->wire.seq = 0;
- txb->wire.type = RXRPC_PACKET_TYPE_ACK;
- txb->wire.flags |= RXRPC_SLOW_START_OK;
- txb->ack.bufferSpace = 0;
- txb->ack.maxSkew = 0;
- txb->ack.firstPacket = 0;
- txb->ack.previousPacket = 0;
- txb->ack.serial = htonl(serial);
- txb->ack.reason = ack_reason;
- txb->ack.nAcks = 0;
-
- trace_rxrpc_send_ack(call, why, ack_reason, serial);
- rxrpc_send_ack_packet(call, txb);
- rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
+ call->delay_ack_at = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_delayed_ack);
}
/*
@@ -114,25 +70,19 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
struct rxrpc_ackpacket *ack = NULL;
struct rxrpc_skb_priv *sp;
struct rxrpc_txbuf *txb;
- unsigned long resend_at;
- rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted);
- ktime_t now, max_age, oldest, ack_ts;
- bool unacked = false;
+ rxrpc_seq_t transmitted = call->tx_transmitted;
+ ktime_t next_resend = KTIME_MAX, rto = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
+ ktime_t resend_at = KTIME_MAX, now, delay;
+ bool unacked = false, did_send = false;
unsigned int i;
- LIST_HEAD(retrans_queue);
_enter("{%d,%d}", call->acks_hard_ack, call->tx_top);
now = ktime_get_real();
- max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
- oldest = now;
if (list_empty(&call->tx_buffer))
goto no_resend;
- if (list_empty(&call->tx_buffer))
- goto no_further_resend;
-
trace_rxrpc_resend(call, ack_skb);
txb = list_first_entry(&call->tx_buffer, struct rxrpc_txbuf, call_link);
@@ -143,12 +93,12 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
sp = rxrpc_skb(ack_skb);
ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
- for (i = 0; i < sp->nr_acks; i++) {
+ for (i = 0; i < sp->ack.nr_acks; i++) {
rxrpc_seq_t seq;
if (ack->acks[i] & 1)
continue;
- seq = sp->first_ack + i;
+ seq = sp->ack.first_ack + i;
if (after(txb->seq, transmitted))
break;
if (after(txb->seq, seq))
@@ -160,19 +110,23 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
goto no_further_resend;
found_txb:
- if (after(ntohl(txb->wire.serial), call->acks_highest_serial))
+ resend_at = ktime_add(txb->last_sent, rto);
+ if (after(txb->serial, call->acks_highest_serial)) {
+ if (ktime_after(resend_at, now) &&
+ ktime_before(resend_at, next_resend))
+ next_resend = resend_at;
continue; /* Ack point not yet reached */
+ }
rxrpc_see_txbuf(txb, rxrpc_txbuf_see_unacked);
- if (list_empty(&txb->tx_link)) {
- list_add_tail(&txb->tx_link, &retrans_queue);
- set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
- }
+ trace_rxrpc_retransmit(call, txb->seq, txb->serial,
+ ktime_sub(resend_at, now));
- trace_rxrpc_retransmit(call, txb->seq,
- ktime_to_ns(ktime_sub(txb->last_sent,
- max_age)));
+ txb->flags |= RXRPC_TXBUF_RESENT;
+ rxrpc_transmit_one(call, txb);
+ did_send = true;
+ now = ktime_get_real();
if (list_is_last(&txb->call_link, &call->tx_buffer))
goto no_further_resend;
@@ -184,43 +138,46 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
* seen. Anything between the soft-ACK table and that point will get
* ACK'd or NACK'd in due course, so don't worry about it here; here we
* need to consider retransmitting anything beyond that point.
- *
- * Note that ACK for a packet can beat the update of tx_transmitted.
*/
- if (after_eq(READ_ONCE(call->acks_prev_seq), READ_ONCE(call->tx_transmitted)))
+ if (after_eq(call->acks_prev_seq, call->tx_transmitted))
goto no_further_resend;
list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
- if (before_eq(txb->seq, READ_ONCE(call->acks_prev_seq)))
+ resend_at = ktime_add(txb->last_sent, rto);
+
+ if (before_eq(txb->seq, call->acks_prev_seq))
continue;
- if (after(txb->seq, READ_ONCE(call->tx_transmitted)))
+ if (after(txb->seq, call->tx_transmitted))
break; /* Not transmitted yet */
if (ack && ack->reason == RXRPC_ACK_PING_RESPONSE &&
- before(ntohl(txb->wire.serial), ntohl(ack->serial)))
+ before(txb->serial, ntohl(ack->serial)))
goto do_resend; /* Wasn't accounted for by a more recent ping. */
- if (ktime_after(txb->last_sent, max_age)) {
- if (ktime_before(txb->last_sent, oldest))
- oldest = txb->last_sent;
+ if (ktime_after(resend_at, now)) {
+ if (ktime_before(resend_at, next_resend))
+ next_resend = resend_at;
continue;
}
do_resend:
unacked = true;
- if (list_empty(&txb->tx_link)) {
- list_add_tail(&txb->tx_link, &retrans_queue);
- set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
- rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
- }
+
+ txb->flags |= RXRPC_TXBUF_RESENT;
+ rxrpc_transmit_one(call, txb);
+ did_send = true;
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
+ now = ktime_get_real();
}
no_further_resend:
no_resend:
- resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
- resend_at += jiffies + rxrpc_get_rto_backoff(call->peer,
- !list_empty(&retrans_queue));
- WRITE_ONCE(call->resend_at, resend_at);
+ if (resend_at < KTIME_MAX) {
+ delay = rxrpc_get_rto_backoff(call->peer, did_send);
+ resend_at = ktime_add(resend_at, delay);
+ trace_rxrpc_timer_set(call, resend_at - now, rxrpc_timer_trace_resend_reset);
+ }
+ call->resend_at = resend_at;
if (unacked)
rxrpc_congestion_timeout(call);
@@ -229,25 +186,15 @@ no_resend:
* that an ACK got lost somewhere. Send a ping to find out instead of
* retransmitting data.
*/
- if (list_empty(&retrans_queue)) {
- rxrpc_reduce_call_timer(call, resend_at, jiffies,
- rxrpc_timer_set_for_resend);
- ack_ts = ktime_sub(now, call->acks_latest_ts);
- if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
- goto out;
- rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
- rxrpc_propose_ack_ping_for_lost_ack);
- goto out;
- }
+ if (!did_send) {
+ ktime_t next_ping = ktime_add_us(call->acks_latest_ts,
+ call->peer->srtt_us >> 3);
- /* Retransmit the queue */
- while ((txb = list_first_entry_or_null(&retrans_queue,
- struct rxrpc_txbuf, tx_link))) {
- list_del_init(&txb->tx_link);
- rxrpc_transmit_one(call, txb);
+ if (ktime_sub(next_ping, now) <= 0)
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_0_retrans);
}
-out:
_leave("");
}
@@ -257,13 +204,11 @@ out:
*/
static void rxrpc_begin_service_reply(struct rxrpc_call *call)
{
- unsigned long now = jiffies;
-
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
- WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0;
- trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
+ call->delay_ack_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
}
/*
@@ -320,7 +265,7 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
call->tx_top = txb->seq;
list_add_tail(&txb->call_link, &call->tx_buffer);
- if (txb->wire.flags & RXRPC_LAST_PACKET)
+ if (txb->flags & RXRPC_LAST_PACKET)
rxrpc_close_tx_phase(call);
rxrpc_transmit_one(call, txb);
@@ -372,8 +317,8 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
*/
bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
{
- unsigned long now, next, t;
- bool resend = false, expired = false;
+ ktime_t now, t;
+ bool resend = false;
s32 abort_code;
rxrpc_see_call(call, rxrpc_call_see_input);
@@ -397,70 +342,73 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
goto out;
+ if (skb)
+ rxrpc_input_call_packet(call, skb);
+
/* If we see our async-event poke, check for timeout trippage. */
- now = jiffies;
- t = READ_ONCE(call->expect_rx_by);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
- expired = true;
+ now = ktime_get_real();
+ t = ktime_sub(call->expect_rx_by, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_expect_rx);
+ goto expired;
}
- t = READ_ONCE(call->expect_req_by);
- if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST &&
- time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
- expired = true;
+ t = ktime_sub(call->expect_req_by, now);
+ if (t <= 0) {
+ call->expect_req_by = KTIME_MAX;
+ if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_idle);
+ goto expired;
+ }
}
- t = READ_ONCE(call->expect_term_by);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
- expired = true;
+ t = ktime_sub(READ_ONCE(call->expect_term_by), now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_hard);
+ goto expired;
}
- t = READ_ONCE(call->delay_ack_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
- cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->delay_ack_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_delayed_ack);
+ call->delay_ack_at = KTIME_MAX;
rxrpc_send_ACK(call, RXRPC_ACK_DELAY, 0,
- rxrpc_propose_ack_ping_for_lost_ack);
+ rxrpc_propose_ack_delayed_ack);
}
- t = READ_ONCE(call->ack_lost_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
- cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->ack_lost_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_lost_ack);
+ call->ack_lost_at = KTIME_MAX;
set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
}
- t = READ_ONCE(call->keepalive_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
- cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->ping_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_ping);
+ call->ping_at = KTIME_MAX;
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_keepalive);
}
- t = READ_ONCE(call->ping_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
- cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
- rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
- rxrpc_propose_ack_ping_for_keepalive);
- }
-
- t = READ_ONCE(call->resend_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
- cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->resend_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_resend);
+ call->resend_at = KTIME_MAX;
resend = true;
}
- if (skb)
- rxrpc_input_call_packet(call, skb);
-
rxrpc_transmit_some_data(call);
+ now = ktime_get_real();
+ t = ktime_sub(call->keepalive_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_keepalive);
+ call->keepalive_at = KTIME_MAX;
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_keepalive);
+ }
+
if (skb) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -472,24 +420,13 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_send_initial_ping(call);
/* Process events */
- if (expired) {
- if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
- (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
- trace_rxrpc_call_reset(call);
- rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
- rxrpc_abort_call_reset);
- } else {
- rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
- rxrpc_abort_call_timeout);
- }
- goto out;
- }
-
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_lost_ack);
- if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY)
+ if (resend &&
+ __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY &&
+ !test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags))
rxrpc_resend(call, NULL);
if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
@@ -511,23 +448,33 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
/* Make sure the timer is restarted */
if (!__rxrpc_call_is_complete(call)) {
- next = call->expect_rx_by;
+ ktime_t next = READ_ONCE(call->expect_term_by), delay;
-#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
+#define set(T) { ktime_t _t = (T); if (ktime_before(_t, next)) next = _t; }
set(call->expect_req_by);
- set(call->expect_term_by);
+ set(call->expect_rx_by);
set(call->delay_ack_at);
set(call->ack_lost_at);
set(call->resend_at);
set(call->keepalive_at);
set(call->ping_at);
- now = jiffies;
- if (time_after_eq(now, next))
+ now = ktime_get_real();
+ delay = ktime_sub(next, now);
+ if (delay <= 0) {
rxrpc_poke_call(call, rxrpc_call_poke_timer_now);
-
- rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
+ } else {
+ unsigned long nowj = jiffies, delayj, nextj;
+
+ delayj = max(nsecs_to_jiffies(delay), 1);
+ nextj = nowj + delayj;
+ if (time_before(nextj, call->timer.expires) ||
+ !timer_pending(&call->timer)) {
+ trace_rxrpc_timer_restart(call, delay, delayj);
+ timer_reduce(&call->timer, nextj);
+ }
+ }
}
out:
@@ -542,4 +489,16 @@ out:
rxrpc_shrink_call_tx_buffer(call);
_leave("");
return true;
+
+expired:
+ if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
+ (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
+ trace_rxrpc_call_reset(call);
+ rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
+ rxrpc_abort_call_reset);
+ } else {
+ rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
+ rxrpc_abort_call_timeout);
+ }
+ goto out;
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 9fc9a6c3f685..01fa71e8b1f7 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -70,20 +70,11 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
_enter("%d", call->debug_id);
if (!__rxrpc_call_is_complete(call)) {
- trace_rxrpc_timer_expired(call, jiffies);
+ trace_rxrpc_timer_expired(call);
rxrpc_poke_call(call, rxrpc_call_poke_timer);
}
}
-void rxrpc_reduce_call_timer(struct rxrpc_call *call,
- unsigned long expire_at,
- unsigned long now,
- enum rxrpc_timer_trace why)
-{
- trace_rxrpc_timer(call, why, now);
- timer_reduce(&call->timer, expire_at);
-}
-
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
static void rxrpc_destroy_call(struct work_struct *);
@@ -163,12 +154,20 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
spin_lock_init(&call->notify_lock);
spin_lock_init(&call->tx_lock);
refcount_set(&call->ref, 1);
- call->debug_id = debug_id;
- call->tx_total_len = -1;
- call->next_rx_timo = 20 * HZ;
- call->next_req_timo = 1 * HZ;
- call->ackr_window = 1;
- call->ackr_wtop = 1;
+ call->debug_id = debug_id;
+ call->tx_total_len = -1;
+ call->next_rx_timo = 20 * HZ;
+ call->next_req_timo = 1 * HZ;
+ call->ackr_window = 1;
+ call->ackr_wtop = 1;
+ call->delay_ack_at = KTIME_MAX;
+ call->ack_lost_at = KTIME_MAX;
+ call->resend_at = KTIME_MAX;
+ call->ping_at = KTIME_MAX;
+ call->keepalive_at = KTIME_MAX;
+ call->expect_rx_by = KTIME_MAX;
+ call->expect_req_by = KTIME_MAX;
+ call->expect_term_by = KTIME_MAX;
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
@@ -226,11 +225,11 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
__set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
if (p->timeouts.normal)
- call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL);
+ call->next_rx_timo = min(p->timeouts.normal, 1);
if (p->timeouts.idle)
- call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL);
+ call->next_req_timo = min(p->timeouts.idle, 1);
if (p->timeouts.hard)
- call->hard_timo = p->timeouts.hard * HZ;
+ call->hard_timo = p->timeouts.hard;
ret = rxrpc_init_client_call_security(call);
if (ret < 0) {
@@ -253,18 +252,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
*/
void rxrpc_start_call_timer(struct rxrpc_call *call)
{
- unsigned long now = jiffies;
- unsigned long j = now + MAX_JIFFY_OFFSET;
-
- call->delay_ack_at = j;
- call->ack_lost_at = j;
- call->resend_at = j;
- call->ping_at = j;
- call->keepalive_at = j;
- call->expect_rx_by = j;
- call->expect_req_by = j;
- call->expect_term_by = j + call->hard_timo;
- call->timer.expires = now;
+ if (call->hard_timo) {
+ ktime_t delay = ms_to_ktime(call->hard_timo * 1000);
+
+ call->expect_term_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ }
+ call->timer.expires = jiffies;
}
/*
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 3b9b267a4431..d25bf1cf3670 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -636,7 +636,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
unsigned long final_ack_at = jiffies + 2;
- WRITE_ONCE(chan->final_ack_at, final_ack_at);
+ chan->final_ack_at = final_ack_at;
smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
rxrpc_reduce_conn_timer(conn, final_ack_at);
@@ -770,7 +770,7 @@ next:
conn_expires_at = conn->idle_timestamp + expiry;
- now = READ_ONCE(jiffies);
+ now = jiffies;
if (time_after(conn_expires_at, now))
goto not_yet_expired;
}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 1f251d758cb9..598b4ee389fc 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -88,7 +88,7 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
struct rxrpc_ackpacket ack;
};
} __attribute__((packed)) pkt;
- struct rxrpc_ackinfo ack_info;
+ struct rxrpc_acktrailer trailer;
size_t len;
int ret, ioc;
u32 serial, mtu, call_id, padding;
@@ -122,8 +122,8 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
iov[0].iov_len = sizeof(pkt.whdr);
iov[1].iov_base = &padding;
iov[1].iov_len = 3;
- iov[2].iov_base = &ack_info;
- iov[2].iov_len = sizeof(ack_info);
+ iov[2].iov_base = &trailer;
+ iov[2].iov_len = sizeof(trailer);
serial = rxrpc_get_next_serial(conn);
@@ -158,14 +158,14 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt.ack.nAcks = 0;
- ack_info.rxMTU = htonl(rxrpc_rx_mtu);
- ack_info.maxMTU = htonl(mtu);
- ack_info.rwind = htonl(rxrpc_rx_window_size);
- ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
+ trailer.maxMTU = htonl(rxrpc_rx_mtu);
+ trailer.ifMTU = htonl(mtu);
+ trailer.rwind = htonl(rxrpc_rx_window_size);
+ trailer.jumbo_max = htonl(rxrpc_rx_jumbo_max);
pkt.whdr.flags |= RXRPC_SLOW_START_OK;
padding = 0;
iov[0].iov_len += sizeof(pkt.ack);
- len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
+ len += sizeof(pkt.ack) + 3 + sizeof(trailer);
ioc = 3;
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index df8a271948a1..0af4642aeec4 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -68,6 +68,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
mutex_init(&conn->security_lock);
+ mutex_init(&conn->tx_data_alloc_lock);
skb_queue_head_init(&conn->rx_queue);
conn->rxnet = rxnet;
conn->security = &rxrpc_no_security;
@@ -341,6 +342,9 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
*/
rxrpc_purge_queue(&conn->rx_queue);
+ if (conn->tx_data_alloc.va)
+ __page_frag_cache_drain(virt_to_page(conn->tx_data_alloc.va),
+ conn->tx_data_alloc.pagecnt_bias);
call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
}
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9691de00ade7..3dedb8c0618c 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -212,7 +212,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) {
if (before_eq(txb->seq, call->acks_hard_ack))
continue;
- if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) {
+ if (txb->flags & RXRPC_LAST_PACKET) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
rot_last = true;
}
@@ -252,6 +252,9 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
{
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
+ call->resend_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_resend);
+
if (unlikely(call->cong_last_nack)) {
rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
call->cong_last_nack = NULL;
@@ -288,15 +291,11 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{
struct rxrpc_ack_summary summary = { 0 };
- unsigned long now, timo;
rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) {
- now = jiffies;
- timo = now + MAX_JIFFY_OFFSET;
-
- WRITE_ONCE(call->delay_ack_at, timo);
- trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
+ call->delay_ack_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
@@ -329,7 +328,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
case RXRPC_CALL_SERVER_RECV_REQUEST:
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
- call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
+ call->expect_req_by = KTIME_MAX;
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
break;
@@ -589,14 +588,12 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
case RXRPC_CALL_SERVER_RECV_REQUEST: {
unsigned long timo = READ_ONCE(call->next_req_timo);
- unsigned long now, expect_req_by;
if (timo) {
- now = jiffies;
- expect_req_by = now + timo;
- WRITE_ONCE(call->expect_req_by, expect_req_by);
- rxrpc_reduce_call_timer(call, expect_req_by, now,
- rxrpc_timer_set_for_idle);
+ ktime_t delay = ms_to_ktime(timo);
+
+ call->expect_req_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_idle);
}
break;
}
@@ -670,14 +667,14 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
/*
* Process the extra information that may be appended to an ACK packet
*/
-static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
- struct rxrpc_ackinfo *ackinfo)
+static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb,
+ struct rxrpc_acktrailer *trailer)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_peer *peer;
unsigned int mtu;
bool wake = false;
- u32 rwind = ntohl(ackinfo->rwind);
+ u32 rwind = ntohl(trailer->rwind);
if (rwind > RXRPC_TX_MAX_WINDOW)
rwind = RXRPC_TX_MAX_WINDOW;
@@ -691,7 +688,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
if (call->cong_ssthresh > rwind)
call->cong_ssthresh = rwind;
- mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
+ mtu = min(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU));
peer = call->peer;
if (mtu < peer->maxdata) {
@@ -713,20 +710,19 @@ static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
rxrpc_seq_t seq)
{
struct sk_buff *skb = call->cong_last_nack;
- struct rxrpc_ackpacket ack;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int i, new_acks = 0, retained_nacks = 0;
- rxrpc_seq_t old_seq = sp->first_ack;
- u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(ack);
+ rxrpc_seq_t old_seq = sp->ack.first_ack;
+ u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
- if (after_eq(seq, old_seq + sp->nr_acks)) {
- summary->nr_new_acks += sp->nr_nacks;
- summary->nr_new_acks += seq - (old_seq + sp->nr_acks);
+ if (after_eq(seq, old_seq + sp->ack.nr_acks)) {
+ summary->nr_new_acks += sp->ack.nr_nacks;
+ summary->nr_new_acks += seq - (old_seq + sp->ack.nr_acks);
summary->nr_retained_nacks = 0;
} else if (seq == old_seq) {
- summary->nr_retained_nacks = sp->nr_nacks;
+ summary->nr_retained_nacks = sp->ack.nr_nacks;
} else {
- for (i = 0; i < sp->nr_acks; i++) {
+ for (i = 0; i < sp->ack.nr_acks; i++) {
if (acks[i] == RXRPC_ACK_TYPE_NACK) {
if (before(old_seq + i, seq))
new_acks++;
@@ -739,7 +735,7 @@ static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
summary->nr_retained_nacks = retained_nacks;
}
- return old_seq + sp->nr_acks;
+ return old_seq + sp->ack.nr_acks;
}
/*
@@ -759,10 +755,10 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call,
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int i, old_nacks = 0;
- rxrpc_seq_t lowest_nak = seq + sp->nr_acks;
+ rxrpc_seq_t lowest_nak = seq + sp->ack.nr_acks;
u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
- for (i = 0; i < sp->nr_acks; i++) {
+ for (i = 0; i < sp->ack.nr_acks; i++) {
if (acks[i] == RXRPC_ACK_TYPE_ACK) {
summary->nr_acks++;
if (after_eq(seq, since))
@@ -774,7 +770,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call,
old_nacks++;
} else {
summary->nr_new_nacks++;
- sp->nr_nacks++;
+ sp->ack.nr_nacks++;
}
if (before(seq, lowest_nak))
@@ -835,38 +831,32 @@ static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_ack_summary summary = { 0 };
- struct rxrpc_ackpacket ack;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_ackinfo info;
+ struct rxrpc_acktrailer trailer;
rxrpc_serial_t ack_serial, acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt, since;
int nr_acks, offset, ioffset;
_enter("");
- offset = sizeof(struct rxrpc_wire_header);
- if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0)
- return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack);
- offset += sizeof(ack);
-
- ack_serial = sp->hdr.serial;
- acked_serial = ntohl(ack.serial);
- first_soft_ack = ntohl(ack.firstPacket);
- prev_pkt = ntohl(ack.previousPacket);
- hard_ack = first_soft_ack - 1;
- nr_acks = ack.nAcks;
- sp->first_ack = first_soft_ack;
- sp->nr_acks = nr_acks;
- summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ?
- ack.reason : RXRPC_ACK__INVALID);
+ offset = sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
+
+ ack_serial = sp->hdr.serial;
+ acked_serial = sp->ack.acked_serial;
+ first_soft_ack = sp->ack.first_ack;
+ prev_pkt = sp->ack.prev_ack;
+ nr_acks = sp->ack.nr_acks;
+ hard_ack = first_soft_ack - 1;
+ summary.ack_reason = (sp->ack.reason < RXRPC_ACK__INVALID ?
+ sp->ack.reason : RXRPC_ACK__INVALID);
trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
- rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]);
+ rxrpc_inc_stat(call->rxnet, stat_rx_acks[summary.ack_reason]);
if (acked_serial != 0) {
- switch (ack.reason) {
+ switch (summary.ack_reason) {
case RXRPC_ACK_PING_RESPONSE:
rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_rtt_rx_ping_response);
@@ -886,7 +876,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
* indicates that the client address changed due to NAT. The server
* lost the call because it switched to a different peer.
*/
- if (unlikely(ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
+ if (unlikely(summary.ack_reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
rxrpc_is_client_call(call)) {
@@ -899,7 +889,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
* indicate a change of address. However, we can retransmit the call
* if we still have it buffered to the beginning.
*/
- if (unlikely(ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
+ if (unlikely(summary.ack_reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
call->acks_hard_ack == 0 &&
@@ -917,11 +907,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
goto send_response;
}
- info.rxMTU = 0;
+ trailer.maxMTU = 0;
ioffset = offset + nr_acks + 3;
- if (skb->len >= ioffset + sizeof(info) &&
- skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
- return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
+ if (skb->len >= ioffset + sizeof(trailer) &&
+ skb_copy_bits(skb, ioffset, &trailer, sizeof(trailer)) < 0)
+ return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_trailer);
if (nr_acks > 0)
skb_condense(skb);
@@ -940,7 +930,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
call->acks_first_seq = first_soft_ack;
call->acks_prev_seq = prev_pkt;
- switch (ack.reason) {
+ switch (summary.ack_reason) {
case RXRPC_ACK_PING:
break;
default:
@@ -950,8 +940,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
}
/* Parse rwind and mtu sizes if provided. */
- if (info.rxMTU)
- rxrpc_input_ackinfo(call, skb, &info);
+ if (trailer.maxMTU)
+ rxrpc_input_ack_trailer(call, skb, &trailer);
if (first_soft_ack == 0)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
@@ -997,7 +987,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_congestion_management(call, skb, &summary, acked_serial);
send_response:
- if (ack.reason == RXRPC_ACK_PING)
+ if (summary.ack_reason == RXRPC_ACK_PING)
rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
rxrpc_propose_ack_respond_to_ping);
else if (sp->hdr.flags & RXRPC_REQUEST_ACK)
@@ -1048,12 +1038,10 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
timo = READ_ONCE(call->next_rx_timo);
if (timo) {
- unsigned long now = jiffies, expect_rx_by;
+ ktime_t delay = ms_to_ktime(timo);
- expect_rx_by = now + timo;
- WRITE_ONCE(call->expect_rx_by, expect_rx_by);
- rxrpc_reduce_call_timer(call, expect_rx_by, now,
- rxrpc_timer_set_for_normal);
+ call->expect_rx_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
}
switch (sp->hdr.type) {
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index 34353b6e584b..f2701068ed9e 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -15,14 +15,11 @@ static int none_init_connection_security(struct rxrpc_connection *conn,
}
/*
- * Work out how much data we can put in an unsecured packet.
+ * Allocate an appropriately sized buffer for the amount of data remaining.
*/
-static int none_how_much_data(struct rxrpc_call *call, size_t remain,
- size_t *_buf_size, size_t *_data_size, size_t *_offset)
+static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- *_buf_size = *_data_size = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- *_offset = 0;
- return 0;
+ return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 0, gfp);
}
static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
@@ -79,7 +76,7 @@ const struct rxrpc_security rxrpc_no_security = {
.exit = none_exit,
.init_connection_security = none_init_connection_security,
.free_call_crypto = none_free_call_crypto,
- .how_much_data = none_how_much_data,
+ .alloc_txbuf = none_alloc_txbuf,
.secure_packet = none_secure_packet,
.verify_packet = none_verify_packet,
.respond_to_challenge = none_respond_to_challenge,
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index 4a3a08a0e2cd..0300baa9afcd 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -124,6 +124,7 @@ static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
struct sk_buff *skb)
{
struct rxrpc_wire_header whdr;
+ struct rxrpc_ackpacket ack;
/* dig out the RxRPC connection details */
if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
@@ -141,6 +142,16 @@ static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
sp->hdr.securityIndex = whdr.securityIndex;
sp->hdr._rsvd = ntohs(whdr._rsvd);
sp->hdr.serviceId = ntohs(whdr.serviceId);
+
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
+ if (skb_copy_bits(skb, sizeof(whdr), &ack, sizeof(ack)) < 0)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_short_ack);
+ sp->ack.first_ack = ntohl(ack.firstPacket);
+ sp->ack.prev_ack = ntohl(ack.previousPacket);
+ sp->ack.acked_serial = ntohl(ack.serial);
+ sp->ack.reason = ack.reason;
+ sp->ack.nr_acks = ack.nAcks;
+ }
return true;
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 34d307368135..504453c688d7 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -452,6 +452,9 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
#endif
rxrpc_purge_queue(&local->rx_queue);
rxrpc_purge_client_connections(local);
+ if (local->tx_alloc.va)
+ __page_frag_cache_drain(virt_to_page(local->tx_alloc.va),
+ local->tx_alloc.pagecnt_bias);
}
/*
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 825b81183046..657cf35089a6 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -17,22 +17,22 @@
unsigned int rxrpc_max_backlog __read_mostly = 10;
/*
- * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ * How long to wait before scheduling an ACK with subtype DELAY (in ms).
*
* We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend.
*/
-unsigned long rxrpc_soft_ack_delay = HZ;
+unsigned long rxrpc_soft_ack_delay = 1000;
/*
- * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ * How long to wait before scheduling an ACK with subtype IDLE (in ms).
*
* We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space.
*/
-unsigned long rxrpc_idle_ack_delay = HZ / 2;
+unsigned long rxrpc_idle_ack_delay = 500;
/*
* Receive window size in packets. This indicates the maximum number of
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 4a292f860ae3..5ea9601efd05 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -48,12 +48,10 @@ static const char rxrpc_keepalive_string[] = "";
static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
{
if (ret < 0) {
- u16 tx_backoff = READ_ONCE(call->tx_backoff);
-
- if (tx_backoff < HZ)
- WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
+ if (call->tx_backoff < 1000)
+ call->tx_backoff += 100;
} else {
- WRITE_ONCE(call->tx_backoff, 0);
+ call->tx_backoff = 0;
}
}
@@ -65,84 +63,92 @@ static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
* Receiving a response to the ping will prevent the ->expect_rx_by timer from
* expiring.
*/
-static void rxrpc_set_keepalive(struct rxrpc_call *call)
+static void rxrpc_set_keepalive(struct rxrpc_call *call, ktime_t now)
{
- unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
+ ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo) / 6);
- keepalive_at += now;
- WRITE_ONCE(call->keepalive_at, keepalive_at);
- rxrpc_reduce_call_timer(call, keepalive_at, now,
- rxrpc_timer_set_for_keepalive);
+ call->keepalive_at = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_keepalive);
}
/*
* Fill out an ACK packet.
*/
-static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
- struct rxrpc_call *call,
- struct rxrpc_txbuf *txb,
- u16 *_rwind)
+static void rxrpc_fill_out_ack(struct rxrpc_call *call,
+ struct rxrpc_txbuf *txb,
+ u8 ack_reason,
+ rxrpc_serial_t serial)
{
- struct rxrpc_ackinfo ackinfo;
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxrpc_acktrailer *trailer = txb->kvec[2].iov_base + 3;
+ struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
unsigned int qsize, sack, wrap, to;
rxrpc_seq_t window, wtop;
int rsize;
u32 mtu, jmax;
- u8 *ackp = txb->acks;
+ u8 *filler = txb->kvec[2].iov_base;
+ u8 *sackp = txb->kvec[1].iov_base;
- call->ackr_nr_unacked = 0;
- atomic_set(&call->ackr_nr_consumed, 0);
rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill);
- clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
window = call->ackr_window;
wtop = call->ackr_wtop;
sack = call->ackr_sack_base % RXRPC_SACK_SIZE;
- txb->ack.firstPacket = htonl(window);
- txb->ack.nAcks = wtop - window;
+
+ whdr->seq = 0;
+ whdr->type = RXRPC_PACKET_TYPE_ACK;
+ txb->flags |= RXRPC_SLOW_START_OK;
+ ack->bufferSpace = 0;
+ ack->maxSkew = 0;
+ ack->firstPacket = htonl(window);
+ ack->previousPacket = htonl(call->rx_highest_seq);
+ ack->serial = htonl(serial);
+ ack->reason = ack_reason;
+ ack->nAcks = wtop - window;
+ filler[0] = 0;
+ filler[1] = 0;
+ filler[2] = 0;
+
+ if (ack_reason == RXRPC_ACK_PING)
+ txb->flags |= RXRPC_REQUEST_ACK;
if (after(wtop, window)) {
+ txb->len += ack->nAcks;
+ txb->kvec[1].iov_base = sackp;
+ txb->kvec[1].iov_len = ack->nAcks;
+
wrap = RXRPC_SACK_SIZE - sack;
- to = min_t(unsigned int, txb->ack.nAcks, RXRPC_SACK_SIZE);
+ to = min_t(unsigned int, ack->nAcks, RXRPC_SACK_SIZE);
- if (sack + txb->ack.nAcks <= RXRPC_SACK_SIZE) {
- memcpy(txb->acks, call->ackr_sack_table + sack, txb->ack.nAcks);
+ if (sack + ack->nAcks <= RXRPC_SACK_SIZE) {
+ memcpy(sackp, call->ackr_sack_table + sack, ack->nAcks);
} else {
- memcpy(txb->acks, call->ackr_sack_table + sack, wrap);
- memcpy(txb->acks + wrap, call->ackr_sack_table,
- to - wrap);
+ memcpy(sackp, call->ackr_sack_table + sack, wrap);
+ memcpy(sackp + wrap, call->ackr_sack_table, to - wrap);
}
-
- ackp += to;
} else if (before(wtop, window)) {
pr_warn("ack window backward %x %x", window, wtop);
- } else if (txb->ack.reason == RXRPC_ACK_DELAY) {
- txb->ack.reason = RXRPC_ACK_IDLE;
+ } else if (ack->reason == RXRPC_ACK_DELAY) {
+ ack->reason = RXRPC_ACK_IDLE;
}
- mtu = conn->peer->if_mtu;
- mtu -= conn->peer->hdrsize;
+ mtu = call->peer->if_mtu;
+ mtu -= call->peer->hdrsize;
jmax = rxrpc_rx_jumbo_max;
qsize = (window - 1) - call->rx_consumed;
rsize = max_t(int, call->rx_winsize - qsize, 0);
- *_rwind = rsize;
- ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
- ackinfo.maxMTU = htonl(mtu);
- ackinfo.rwind = htonl(rsize);
- ackinfo.jumbo_max = htonl(jmax);
-
- *ackp++ = 0;
- *ackp++ = 0;
- *ackp++ = 0;
- memcpy(ackp, &ackinfo, sizeof(ackinfo));
- return txb->ack.nAcks + 3 + sizeof(ackinfo);
+ txb->ack_rwind = rsize;
+ trailer->maxMTU = htonl(rxrpc_rx_mtu);
+ trailer->ifMTU = htonl(mtu);
+ trailer->rwind = htonl(rsize);
+ trailer->jumbo_max = htonl(jmax);
}
/*
* Record the beginning of an RTT probe.
*/
-static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
- enum rxrpc_rtt_tx_trace why)
+static void rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
+ ktime_t now, enum rxrpc_rtt_tx_trace why)
{
unsigned long avail = call->rtt_avail;
int rtt_slot = 9;
@@ -155,47 +161,31 @@ static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
goto no_slot;
call->rtt_serial[rtt_slot] = serial;
- call->rtt_sent_at[rtt_slot] = ktime_get_real();
+ call->rtt_sent_at[rtt_slot] = now;
smp_wmb(); /* Write data before avail bit */
set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
- return rtt_slot;
+ return;
no_slot:
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
- return -1;
-}
-
-/*
- * Cancel an RTT probe.
- */
-static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
- rxrpc_serial_t serial, int rtt_slot)
-{
- if (rtt_slot != -1) {
- clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
- smp_wmb(); /* Clear pending bit before setting slot */
- set_bit(rtt_slot, &call->rtt_avail);
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
- }
}
/*
* Transmit an ACK packet.
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+static void rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
{
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
struct rxrpc_connection *conn;
+ struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
struct msghdr msg;
- struct kvec iov[1];
- rxrpc_serial_t serial;
- size_t len, n;
- int ret, rtt_slot = -1;
- u16 rwind;
+ ktime_t now;
+ int ret;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
- return -ECONNRESET;
+ return;
conn = call->conn;
@@ -203,55 +193,68 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
msg.msg_namelen = call->peer->srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- if (txb->ack.reason == RXRPC_ACK_PING)
- txb->wire.flags |= RXRPC_REQUEST_ACK;
-
- n = rxrpc_fill_out_ack(conn, call, txb, &rwind);
- if (n == 0)
- return 0;
-
- iov[0].iov_base = &txb->wire;
- iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n;
- len = iov[0].iov_len;
+ msg.msg_flags = MSG_SPLICE_PAGES;
- serial = rxrpc_get_next_serial(conn);
- txb->wire.serial = htonl(serial);
- trace_rxrpc_tx_ack(call->debug_id, serial,
- ntohl(txb->ack.firstPacket),
- ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks,
- rwind);
+ whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
- if (txb->ack.reason == RXRPC_ACK_PING)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
+ txb->serial = rxrpc_get_next_serial(conn);
+ whdr->serial = htonl(txb->serial);
+ trace_rxrpc_tx_ack(call->debug_id, txb->serial,
+ ntohl(ack->firstPacket),
+ ntohl(ack->serial), ack->reason, ack->nAcks,
+ txb->ack_rwind);
rxrpc_inc_stat(call->rxnet, stat_tx_ack_send);
- /* Grab the highest received seq as late as possible */
- txb->ack.previousPacket = htonl(call->rx_highest_seq);
-
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
- ret = do_udp_sendmsg(conn->local->socket, &msg, len);
+ iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, txb->len);
+ rxrpc_local_dont_fragment(conn->local, false);
+ ret = do_udp_sendmsg(conn->local->socket, &msg, txb->len);
call->peer->last_tx_at = ktime_get_seconds();
if (ret < 0) {
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
+ trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret,
rxrpc_tx_point_call_ack);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
+ trace_rxrpc_tx_packet(call->debug_id, whdr,
rxrpc_tx_point_call_ack);
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- call->peer->rtt_last_req = ktime_get_real();
+ now = ktime_get_real();
+ if (ack->reason == RXRPC_ACK_PING)
+ rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_ping);
+ if (txb->flags & RXRPC_REQUEST_ACK)
+ call->peer->rtt_last_req = now;
+ rxrpc_set_keepalive(call, now);
}
rxrpc_tx_backoff(call, ret);
+}
- if (!__rxrpc_call_is_complete(call)) {
- if (ret < 0)
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- rxrpc_set_keepalive(call);
+/*
+ * Queue an ACK for immediate transmission.
+ */
+void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
+ rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
+{
+ struct rxrpc_txbuf *txb;
+
+ if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
+ return;
+
+ rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
+
+ txb = rxrpc_alloc_ack_txbuf(call, call->ackr_wtop - call->ackr_window);
+ if (!txb) {
+ kleave(" = -ENOMEM");
+ return;
}
- return ret;
+ txb->ack_why = why;
+
+ rxrpc_fill_out_ack(call, txb, ack_reason, serial);
+ call->ackr_nr_unacked = 0;
+ atomic_set(&call->ackr_nr_consumed, 0);
+ clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
+
+ trace_rxrpc_send_ack(call, why, ack_reason, serial);
+ rxrpc_send_ack_packet(call, txb);
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
}
/*
@@ -319,38 +322,22 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
}
/*
- * send a packet through the transport endpoint
+ * Prepare a (sub)packet for transmission.
*/
-int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+static void rxrpc_prepare_data_subpacket(struct rxrpc_call *call, struct rxrpc_txbuf *txb,
+ rxrpc_serial_t serial)
{
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
enum rxrpc_req_ack_trace why;
struct rxrpc_connection *conn = call->conn;
- struct msghdr msg;
- struct kvec iov[1];
- rxrpc_serial_t serial;
- size_t len;
- int ret, rtt_slot = -1;
_enter("%x,{%d}", txb->seq, txb->len);
- /* Each transmission of a Tx packet needs a new serial number */
- serial = rxrpc_get_next_serial(conn);
- txb->wire.serial = htonl(serial);
+ txb->serial = serial;
if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
txb->seq == 1)
- txb->wire.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
-
- iov[0].iov_base = &txb->wire;
- iov[0].iov_len = sizeof(txb->wire) + txb->len;
- len = iov[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
-
- msg.msg_name = &call->peer->srx.transport;
- msg.msg_namelen = call->peer->srx.transport_len;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
+ whdr->userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
/* If our RTT cache needs working on, request an ACK. Also request
* ACKs if a DATA packet appears to have been lost.
@@ -359,13 +346,13 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
* service call, lest OpenAFS incorrectly send us an ACK with some
* soft-ACKs in it and then never follow up with a proper hard ACK.
*/
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
+ if (txb->flags & RXRPC_REQUEST_ACK)
why = rxrpc_reqack_already_on;
- else if (test_bit(RXRPC_TXBUF_LAST, &txb->flags) && rxrpc_sending_to_client(txb))
+ else if ((txb->flags & RXRPC_LAST_PACKET) && rxrpc_sending_to_client(txb))
why = rxrpc_reqack_no_srv_last;
else if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
why = rxrpc_reqack_ack_lost;
- else if (test_bit(RXRPC_TXBUF_RESENT, &txb->flags))
+ else if (txb->flags & RXRPC_TXBUF_RESENT)
why = rxrpc_reqack_retrans;
else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2)
why = rxrpc_reqack_slow_start;
@@ -381,42 +368,116 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
rxrpc_inc_stat(call->rxnet, stat_why_req_ack[why]);
trace_rxrpc_req_ack(call->debug_id, txb->seq, why);
if (why != rxrpc_reqack_no_srv_last)
- txb->wire.flags |= RXRPC_REQUEST_ACK;
+ txb->flags |= RXRPC_REQUEST_ACK;
dont_set_request_ack:
+ whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
+ whdr->serial = htonl(txb->serial);
+ whdr->cksum = txb->cksum;
+
+ trace_rxrpc_tx_data(call, txb->seq, txb->serial, txb->flags, false);
+}
+
+/*
+ * Prepare a packet for transmission.
+ */
+static size_t rxrpc_prepare_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+{
+ rxrpc_serial_t serial;
+
+ /* Each transmission of a Tx packet needs a new serial number */
+ serial = rxrpc_get_next_serial(call->conn);
+
+ rxrpc_prepare_data_subpacket(call, txb, serial);
+
+ return txb->len;
+}
+
+/*
+ * Set timeouts after transmitting a packet.
+ */
+static void rxrpc_tstamp_data_packets(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+{
+ ktime_t now = ktime_get_real();
+ bool ack_requested = txb->flags & RXRPC_REQUEST_ACK;
+
+ call->tx_last_sent = now;
+ txb->last_sent = now;
+
+ if (ack_requested) {
+ rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_data);
+
+ call->peer->rtt_last_req = now;
+ if (call->peer->rtt_count > 1) {
+ ktime_t delay = rxrpc_get_rto_backoff(call->peer, false);
+
+ call->ack_lost_at = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_lost_ack);
+ }
+ }
+
+ if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags)) {
+ ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo));
+
+ call->expect_rx_by = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
+ }
+
+ rxrpc_set_keepalive(call, now);
+}
+
+/*
+ * send a packet through the transport endpoint
+ */
+static int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+{
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxrpc_connection *conn = call->conn;
+ enum rxrpc_tx_point frag;
+ struct msghdr msg;
+ size_t len;
+ int ret;
+
+ _enter("%x,{%d}", txb->seq, txb->len);
+
+ len = rxrpc_prepare_data_packet(call, txb);
+
if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
static int lose;
if ((lose++ & 7) == 7) {
ret = 0;
- trace_rxrpc_tx_data(call, txb->seq, serial,
- txb->wire.flags,
- test_bit(RXRPC_TXBUF_RESENT, &txb->flags),
- true);
+ trace_rxrpc_tx_data(call, txb->seq, txb->serial,
+ txb->flags, true);
goto done;
}
}
- trace_rxrpc_tx_data(call, txb->seq, serial, txb->wire.flags,
- test_bit(RXRPC_TXBUF_RESENT, &txb->flags), false);
+ iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, len);
+
+ msg.msg_name = &call->peer->srx.transport;
+ msg.msg_namelen = call->peer->srx.transport_len;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = MSG_SPLICE_PAGES;
/* Track what we've attempted to transmit at least once so that the
* retransmission algorithm doesn't try to resend what we haven't sent
- * yet. However, this can race as we can receive an ACK before we get
- * to this point. But, OTOH, if we won't get an ACK mentioning this
- * packet unless the far side received it (though it could have
- * discarded it anyway and NAK'd it).
+ * yet.
*/
- cmpxchg(&call->tx_transmitted, txb->seq - 1, txb->seq);
+ if (txb->seq == call->tx_transmitted + 1)
+ call->tx_transmitted = txb->seq;
/* send the packet with the don't fragment bit set if we currently
* think it's small enough */
- if (txb->len >= call->peer->maxdata)
- goto send_fragmentable;
-
- txb->last_sent = ktime_get_real();
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
+ if (txb->len >= call->peer->maxdata) {
+ rxrpc_local_dont_fragment(conn->local, false);
+ frag = rxrpc_tx_point_call_data_frag;
+ } else {
+ rxrpc_local_dont_fragment(conn->local, true);
+ frag = rxrpc_tx_point_call_data_nofrag;
+ }
+retry:
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
* to go out of the interface
@@ -429,46 +490,21 @@ dont_set_request_ack:
if (ret < 0) {
rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_point_call_data_nofrag);
+ trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret, frag);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
- rxrpc_tx_point_call_data_nofrag);
+ trace_rxrpc_tx_packet(call->debug_id, whdr, frag);
}
rxrpc_tx_backoff(call, ret);
- if (ret == -EMSGSIZE)
- goto send_fragmentable;
+ if (ret == -EMSGSIZE && frag == rxrpc_tx_point_call_data_frag) {
+ rxrpc_local_dont_fragment(conn->local, false);
+ frag = rxrpc_tx_point_call_data_frag;
+ goto retry;
+ }
done:
if (ret >= 0) {
- call->tx_last_sent = txb->last_sent;
- if (txb->wire.flags & RXRPC_REQUEST_ACK) {
- call->peer->rtt_last_req = txb->last_sent;
- if (call->peer->rtt_count > 1) {
- unsigned long nowj = jiffies, ack_lost_at;
-
- ack_lost_at = rxrpc_get_rto_backoff(call->peer, false);
- ack_lost_at += nowj;
- WRITE_ONCE(call->ack_lost_at, ack_lost_at);
- rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
- rxrpc_timer_set_for_lost_ack);
- }
- }
-
- if (txb->seq == 1 &&
- !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
- &call->flags)) {
- unsigned long nowj = jiffies, expect_rx_by;
-
- expect_rx_by = nowj + call->next_rx_timo;
- WRITE_ONCE(call->expect_rx_by, expect_rx_by);
- rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
- rxrpc_timer_set_for_normal);
- }
-
- rxrpc_set_keepalive(call);
+ rxrpc_tstamp_data_packets(call, txb);
} else {
/* Cancel the call if the initial transmission fails,
* particularly if that's due to network routing issues that
@@ -482,41 +518,6 @@ done:
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;
-
-send_fragmentable:
- /* attempt to send this message with fragmentation enabled */
- _debug("send fragment");
-
- txb->last_sent = ktime_get_real();
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
-
- switch (conn->local->srx.transport.family) {
- case AF_INET6:
- case AF_INET:
- rxrpc_local_dont_fragment(conn->local, false);
- rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag);
- ret = do_udp_sendmsg(conn->local->socket, &msg, len);
- conn->peer->last_tx_at = ktime_get_seconds();
-
- rxrpc_local_dont_fragment(conn->local, true);
- break;
-
- default:
- BUG();
- }
-
- if (ret < 0) {
- rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_point_call_data_frag);
- } else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
- rxrpc_tx_point_call_data_frag);
- }
- rxrpc_tx_backoff(call, ret);
- goto done;
}
/*
@@ -723,11 +724,9 @@ void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
rxrpc_instant_resend(call, txb);
}
} else {
- unsigned long now = jiffies;
- unsigned long resend_at = now + call->peer->rto_j;
+ ktime_t delay = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
- WRITE_ONCE(call->resend_at, resend_at);
- rxrpc_reduce_call_timer(call, resend_at, now,
- rxrpc_timer_set_for_send);
+ call->resend_at = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_resend_tx);
}
}
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 26dc2f26d92d..263a2251e3d2 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -52,9 +52,9 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_call *call;
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
enum rxrpc_call_state state;
- unsigned long timeout = 0;
rxrpc_seq_t acks_hard_ack;
char lbuff[50], rbuff[50];
+ long timeout = 0;
if (v == &rxnet->calls) {
seq_puts(seq,
@@ -76,10 +76,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
state = rxrpc_call_state(call);
- if (state != RXRPC_CALL_SERVER_PREALLOC) {
- timeout = READ_ONCE(call->expect_rx_by);
- timeout -= jiffies;
- }
+ if (state != RXRPC_CALL_SERVER_PREALLOC)
+ timeout = ktime_ms_delta(READ_ONCE(call->expect_rx_by), ktime_get_real());
acks_hard_ack = READ_ONCE(call->acks_hard_ack);
seq_printf(seq,
@@ -309,7 +307,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
peer->mtu,
now - peer->last_tx_at,
peer->srtt_us >> 3,
- jiffies_to_usecs(peer->rto_j));
+ peer->rto_us);
return 0;
}
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index e8ee4af43ca8..4fe6b4d20ada 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -135,9 +135,9 @@ struct rxrpc_ackpacket {
/*
* ACK packets can have a further piece of information tagged on the end
*/
-struct rxrpc_ackinfo {
- __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
- __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
+struct rxrpc_acktrailer {
+ __be32 maxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
+ __be32 ifMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
__be32 rwind; /* Rx window size (packets) [AFS 3.4] */
__be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */
};
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index be61d6f5be8d..cdab7b7d08a0 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -11,8 +11,8 @@
#include <linux/net.h>
#include "ar-internal.h"
-#define RXRPC_RTO_MAX ((unsigned)(120 * HZ))
-#define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
+#define RXRPC_RTO_MAX (120 * USEC_PER_SEC)
+#define RXRPC_TIMEOUT_INIT ((unsigned int)(1 * MSEC_PER_SEC)) /* RFC6298 2.1 initial RTO value */
#define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */
static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
@@ -22,7 +22,7 @@ static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
{
- return usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us);
+ return (peer->srtt_us >> 3) + peer->rttvar_us;
}
static u32 rxrpc_bound_rto(u32 rto)
@@ -124,7 +124,7 @@ static void rxrpc_set_rto(struct rxrpc_peer *peer)
/* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo
* guarantees that rto is higher.
*/
- peer->rto_j = rxrpc_bound_rto(rto);
+ peer->rto_us = rxrpc_bound_rto(rto);
}
static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
@@ -163,33 +163,33 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
spin_unlock(&peer->rtt_input_lock);
trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
- peer->srtt_us >> 3, peer->rto_j);
+ peer->srtt_us >> 3, peer->rto_us);
}
/*
- * Get the retransmission timeout to set in jiffies, backing it off each time
- * we retransmit.
+ * Get the retransmission timeout to set in nanoseconds, backing it off each
+ * time we retransmit.
*/
-unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
+ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
{
- u64 timo_j;
- u8 backoff = READ_ONCE(peer->backoff);
+ u64 timo_us;
+ u32 backoff = READ_ONCE(peer->backoff);
- timo_j = peer->rto_j;
- timo_j <<= backoff;
- if (retrans && timo_j * 2 <= RXRPC_RTO_MAX)
+ timo_us = peer->rto_us;
+ timo_us <<= backoff;
+ if (retrans && timo_us * 2 <= RXRPC_RTO_MAX)
WRITE_ONCE(peer->backoff, backoff + 1);
- if (timo_j < 1)
- timo_j = 1;
+ if (timo_us < 1)
+ timo_us = 1;
- return timo_j;
+ return ns_to_ktime(timo_us * NSEC_PER_USEC);
}
void rxrpc_peer_init_rtt(struct rxrpc_peer *peer)
{
- peer->rto_j = RXRPC_TIMEOUT_INIT;
- peer->mdev_us = jiffies_to_usecs(RXRPC_TIMEOUT_INIT);
+ peer->rto_us = RXRPC_TIMEOUT_INIT;
+ peer->mdev_us = RXRPC_TIMEOUT_INIT;
peer->backoff = 0;
//minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U);
}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 6b32d61d4cdc..f1a68270862d 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -145,16 +145,17 @@ error:
/*
* Work out how much data we can put in a packet.
*/
-static int rxkad_how_much_data(struct rxrpc_call *call, size_t remain,
- size_t *_buf_size, size_t *_data_size, size_t *_offset)
+static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- size_t shdr, buf_size, chunk;
+ struct rxrpc_txbuf *txb;
+ size_t shdr, space;
+
+ remain = min(remain, 65535 - sizeof(struct rxrpc_wire_header));
switch (call->conn->security_level) {
default:
- buf_size = chunk = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- shdr = 0;
- goto out;
+ space = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
+ return rxrpc_alloc_data_txbuf(call, space, 0, gfp);
case RXRPC_SECURITY_AUTH:
shdr = sizeof(struct rxkad_level1_hdr);
break;
@@ -163,17 +164,16 @@ static int rxkad_how_much_data(struct rxrpc_call *call, size_t remain,
break;
}
- buf_size = round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN);
+ space = min_t(size_t, round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN), remain + shdr);
+ space = round_up(space, RXKAD_ALIGN);
- chunk = buf_size - shdr;
- if (remain < chunk)
- buf_size = round_up(shdr + remain, RXKAD_ALIGN);
+ txb = rxrpc_alloc_data_txbuf(call, space, RXKAD_ALIGN, gfp);
+ if (!txb)
+ return NULL;
-out:
- *_buf_size = buf_size;
- *_data_size = chunk;
- *_offset = shdr;
- return 0;
+ txb->offset += shdr;
+ txb->space -= shdr;
+ return txb;
}
/*
@@ -251,7 +251,8 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
struct rxrpc_txbuf *txb,
struct skcipher_request *req)
{
- struct rxkad_level1_hdr *hdr = (void *)txb->data;
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxkad_level1_hdr *hdr = (void *)(whdr + 1);
struct rxrpc_crypt iv;
struct scatterlist sg;
size_t pad;
@@ -259,7 +260,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
_enter("");
- check = txb->seq ^ ntohl(txb->wire.callNumber);
+ check = txb->seq ^ call->call_id;
hdr->data_size = htonl((u32)check << 16 | txb->len);
txb->len += sizeof(struct rxkad_level1_hdr);
@@ -267,14 +268,14 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
pad = RXKAD_ALIGN - pad;
pad &= RXKAD_ALIGN - 1;
if (pad) {
- memset(txb->data + txb->offset, 0, pad);
+ memset(txb->kvec[0].iov_base + txb->offset, 0, pad);
txb->len += pad;
}
/* start the encryption afresh */
memset(&iv, 0, sizeof(iv));
- sg_init_one(&sg, txb->data, 8);
+ sg_init_one(&sg, hdr, 8);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
@@ -293,7 +294,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct skcipher_request *req)
{
const struct rxrpc_key_token *token;
- struct rxkad_level2_hdr *rxkhdr = (void *)txb->data;
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxkad_level2_hdr *rxkhdr = (void *)(whdr + 1);
struct rxrpc_crypt iv;
struct scatterlist sg;
size_t pad;
@@ -302,7 +304,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
_enter("");
- check = txb->seq ^ ntohl(txb->wire.callNumber);
+ check = txb->seq ^ call->call_id;
rxkhdr->data_size = htonl(txb->len | (u32)check << 16);
rxkhdr->checksum = 0;
@@ -312,7 +314,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
pad = RXKAD_ALIGN - pad;
pad &= RXKAD_ALIGN - 1;
if (pad) {
- memset(txb->data + txb->offset, 0, pad);
+ memset(txb->kvec[0].iov_base + txb->offset, 0, pad);
txb->len += pad;
}
@@ -320,7 +322,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
token = call->conn->key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
- sg_init_one(&sg, txb->data, txb->len);
+ sg_init_one(&sg, rxkhdr, txb->len);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, txb->len, iv.x);
@@ -362,9 +364,9 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv));
/* calculate the security checksum */
- x = (ntohl(txb->wire.cid) & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
+ x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
x |= txb->seq & 0x3fffffff;
- crypto.buf[0] = txb->wire.callNumber;
+ crypto.buf[0] = htonl(call->call_id);
crypto.buf[1] = htonl(x);
sg_init_one(&sg, crypto.buf, 8);
@@ -378,7 +380,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
y = (y >> 16) & 0xffff;
if (y == 0)
y = 1; /* zero checksums are not permitted */
- txb->wire.cksum = htons(y);
+ txb->cksum = htons(y);
switch (call->conn->security_level) {
case RXRPC_SECURITY_PLAIN:
@@ -726,7 +728,6 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
rxrpc_local_dont_fragment(conn->local, false);
ret = kernel_sendmsg(conn->local->socket, &msg, iov, 3, len);
- rxrpc_local_dont_fragment(conn->local, true);
if (ret < 0) {
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
rxrpc_tx_point_rxkad_response);
@@ -1256,7 +1257,7 @@ const struct rxrpc_security rxkad = {
.free_preparse_server_key = rxkad_free_preparse_server_key,
.destroy_server_key = rxkad_destroy_server_key,
.init_connection_security = rxkad_init_connection_security,
- .how_much_data = rxkad_how_much_data,
+ .alloc_txbuf = rxkad_alloc_txbuf,
.secure_packet = rxkad_secure_packet,
.verify_packet = rxkad_verify_packet,
.free_call_crypto = rxkad_free_call_crypto,
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 5677d5690a02..6f765768c49c 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -240,7 +240,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
rxrpc_notify_end_tx_t notify_end_tx)
{
rxrpc_seq_t seq = txb->seq;
- bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke;
+ bool poke, last = txb->flags & RXRPC_LAST_PACKET;
rxrpc_inc_stat(call->rxnet, stat_tx_data);
@@ -336,7 +336,7 @@ reload:
do {
if (!txb) {
- size_t remain, bufsize, chunk, offset;
+ size_t remain;
_debug("alloc");
@@ -348,23 +348,11 @@ reload:
* region (enc blocksize), but the trailer is not.
*/
remain = more ? INT_MAX : msg_data_left(msg);
- ret = call->conn->security->how_much_data(call, remain,
- &bufsize, &chunk, &offset);
- if (ret < 0)
+ txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation);
+ if (IS_ERR(txb)) {
+ ret = PTR_ERR(txb);
goto maybe_error;
-
- _debug("SIZE: %zu/%zu @%zu", chunk, bufsize, offset);
-
- /* create a buffer that we can retain until it's ACK'd */
- ret = -ENOMEM;
- txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_DATA,
- GFP_KERNEL);
- if (!txb)
- goto maybe_error;
-
- txb->offset = offset;
- txb->space -= offset;
- txb->space = min_t(size_t, chunk, txb->space);
+ }
}
_debug("append");
@@ -374,8 +362,8 @@ reload:
size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
_debug("add %zu", copy);
- if (!copy_from_iter_full(txb->data + txb->offset, copy,
- &msg->msg_iter))
+ if (!copy_from_iter_full(txb->kvec[0].iov_base + txb->offset,
+ copy, &msg->msg_iter))
goto efault;
_debug("added");
txb->space -= copy;
@@ -394,18 +382,18 @@ reload:
/* add the packet to the send queue if it's now full */
if (!txb->space ||
(msg_data_left(msg) == 0 && !more)) {
- if (msg_data_left(msg) == 0 && !more) {
- txb->wire.flags |= RXRPC_LAST_PACKET;
- __set_bit(RXRPC_TXBUF_LAST, &txb->flags);
- }
+ if (msg_data_left(msg) == 0 && !more)
+ txb->flags |= RXRPC_LAST_PACKET;
else if (call->tx_top - call->acks_hard_ack <
call->tx_winsize)
- txb->wire.flags |= RXRPC_MORE_PACKETS;
+ txb->flags |= RXRPC_MORE_PACKETS;
ret = call->security->secure_packet(call, txb);
if (ret < 0)
goto out;
+ txb->kvec[0].iov_len += txb->len;
+ txb->len = txb->kvec[0].iov_len;
rxrpc_queue_packet(rx, call, txb, notify_end_tx);
txb = NULL;
}
@@ -621,7 +609,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call;
- unsigned long now, j;
bool dropped_lock = false;
int ret;
@@ -699,25 +686,21 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
switch (p.call.nr_timeouts) {
case 3:
- j = msecs_to_jiffies(p.call.timeouts.normal);
- if (p.call.timeouts.normal > 0 && j == 0)
- j = 1;
- WRITE_ONCE(call->next_rx_timo, j);
+ WRITE_ONCE(call->next_rx_timo, p.call.timeouts.normal);
fallthrough;
case 2:
- j = msecs_to_jiffies(p.call.timeouts.idle);
- if (p.call.timeouts.idle > 0 && j == 0)
- j = 1;
- WRITE_ONCE(call->next_req_timo, j);
+ WRITE_ONCE(call->next_req_timo, p.call.timeouts.idle);
fallthrough;
case 1:
if (p.call.timeouts.hard > 0) {
- j = p.call.timeouts.hard * HZ;
- now = jiffies;
- j += now;
- WRITE_ONCE(call->expect_term_by, j);
- rxrpc_reduce_call_timer(call, j, now,
- rxrpc_timer_set_for_hard);
+ ktime_t delay = ms_to_ktime(p.call.timeouts.hard * MSEC_PER_SEC);
+
+ WRITE_ONCE(call->expect_term_by,
+ ktime_add(p.call.timeouts.hard,
+ ktime_get_real()));
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
+
}
break;
}
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index ecaeb4ecfb58..c9bedd0e2d86 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -15,6 +15,8 @@ static const unsigned int four = 4;
static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1;
static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = 255;
+static const unsigned long one_ms = 1;
+static const unsigned long max_ms = 1000;
static const unsigned long one_jiffy = 1;
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
@@ -28,24 +30,24 @@ static const unsigned long max_500 = 500;
* information on the individual parameters.
*/
static struct ctl_table rxrpc_sysctl_table[] = {
- /* Values measured in milliseconds but used in jiffies */
+ /* Values measured in milliseconds */
{
.procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_doulongvec_ms_jiffies_minmax,
- .extra1 = (void *)&one_jiffy,
- .extra2 = (void *)&max_jiffies,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = (void *)&one_ms,
+ .extra2 = (void *)&max_ms,
},
{
.procname = "idle_ack_delay",
.data = &rxrpc_idle_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_doulongvec_ms_jiffies_minmax,
- .extra1 = (void *)&one_jiffy,
- .extra2 = (void *)&max_jiffies,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = (void *)&one_ms,
+ .extra2 = (void *)&max_ms,
},
{
.procname = "idle_conn_expiry",
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index d43be8512386..b2a82ab756c2 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -14,45 +14,146 @@ static atomic_t rxrpc_txbuf_debug_ids;
atomic_t rxrpc_nr_txbuf;
/*
- * Allocate and partially initialise an I/O request structure.
+ * Allocate and partially initialise a data transmission buffer.
*/
-struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
- gfp_t gfp)
+struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
+ size_t data_align, gfp_t gfp)
{
+ struct rxrpc_wire_header *whdr;
struct rxrpc_txbuf *txb;
+ size_t total, hoff = 0;
+ void *buf;
txb = kmalloc(sizeof(*txb), gfp);
- if (txb) {
- INIT_LIST_HEAD(&txb->call_link);
- INIT_LIST_HEAD(&txb->tx_link);
- refcount_set(&txb->ref, 1);
- txb->call_debug_id = call->debug_id;
- txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
- txb->space = sizeof(txb->data);
- txb->len = 0;
- txb->offset = 0;
- txb->flags = 0;
- txb->ack_why = 0;
- txb->seq = call->tx_prepared + 1;
- txb->wire.epoch = htonl(call->conn->proto.epoch);
- txb->wire.cid = htonl(call->cid);
- txb->wire.callNumber = htonl(call->call_id);
- txb->wire.seq = htonl(txb->seq);
- txb->wire.type = packet_type;
- txb->wire.flags = call->conn->out_clientflag;
- txb->wire.userStatus = 0;
- txb->wire.securityIndex = call->security_ix;
- txb->wire._rsvd = 0;
- txb->wire.serviceId = htons(call->dest_srx.srx_service);
-
- trace_rxrpc_txbuf(txb->debug_id,
- txb->call_debug_id, txb->seq, 1,
- packet_type == RXRPC_PACKET_TYPE_DATA ?
- rxrpc_txbuf_alloc_data :
- rxrpc_txbuf_alloc_ack);
- atomic_inc(&rxrpc_nr_txbuf);
+ if (!txb)
+ return NULL;
+
+ if (data_align)
+ hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
+ total = hoff + sizeof(*whdr) + data_size;
+
+ mutex_lock(&call->conn->tx_data_alloc_lock);
+ buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
+ ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
+ mutex_unlock(&call->conn->tx_data_alloc_lock);
+ if (!buf) {
+ kfree(txb);
+ return NULL;
+ }
+
+ whdr = buf + hoff;
+
+ INIT_LIST_HEAD(&txb->call_link);
+ INIT_LIST_HEAD(&txb->tx_link);
+ refcount_set(&txb->ref, 1);
+ txb->last_sent = KTIME_MIN;
+ txb->call_debug_id = call->debug_id;
+ txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
+ txb->space = data_size;
+ txb->len = 0;
+ txb->offset = sizeof(*whdr);
+ txb->flags = call->conn->out_clientflag;
+ txb->ack_why = 0;
+ txb->seq = call->tx_prepared + 1;
+ txb->serial = 0;
+ txb->cksum = 0;
+ txb->nr_kvec = 1;
+ txb->kvec[0].iov_base = whdr;
+ txb->kvec[0].iov_len = sizeof(*whdr);
+
+ whdr->epoch = htonl(call->conn->proto.epoch);
+ whdr->cid = htonl(call->cid);
+ whdr->callNumber = htonl(call->call_id);
+ whdr->seq = htonl(txb->seq);
+ whdr->type = RXRPC_PACKET_TYPE_DATA;
+ whdr->flags = 0;
+ whdr->userStatus = 0;
+ whdr->securityIndex = call->security_ix;
+ whdr->_rsvd = 0;
+ whdr->serviceId = htons(call->dest_srx.srx_service);
+
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 1,
+ rxrpc_txbuf_alloc_data);
+
+ atomic_inc(&rxrpc_nr_txbuf);
+ return txb;
+}
+
+/*
+ * Allocate and partially initialise an ACK packet.
+ */
+struct rxrpc_txbuf *rxrpc_alloc_ack_txbuf(struct rxrpc_call *call, size_t sack_size)
+{
+ struct rxrpc_wire_header *whdr;
+ struct rxrpc_acktrailer *trailer;
+ struct rxrpc_ackpacket *ack;
+ struct rxrpc_txbuf *txb;
+ gfp_t gfp = rcu_read_lock_held() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS;
+ void *buf, *buf2 = NULL;
+ u8 *filler;
+
+ txb = kmalloc(sizeof(*txb), gfp);
+ if (!txb)
+ return NULL;
+
+ buf = page_frag_alloc(&call->local->tx_alloc,
+ sizeof(*whdr) + sizeof(*ack) + 1 + 3 + sizeof(*trailer), gfp);
+ if (!buf) {
+ kfree(txb);
+ return NULL;
+ }
+
+ if (sack_size) {
+ buf2 = page_frag_alloc(&call->local->tx_alloc, sack_size, gfp);
+ if (!buf2) {
+ page_frag_free(buf);
+ kfree(txb);
+ return NULL;
+ }
}
+ whdr = buf;
+ ack = buf + sizeof(*whdr);
+ filler = buf + sizeof(*whdr) + sizeof(*ack) + 1;
+ trailer = buf + sizeof(*whdr) + sizeof(*ack) + 1 + 3;
+
+ INIT_LIST_HEAD(&txb->call_link);
+ INIT_LIST_HEAD(&txb->tx_link);
+ refcount_set(&txb->ref, 1);
+ txb->call_debug_id = call->debug_id;
+ txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
+ txb->space = 0;
+ txb->len = sizeof(*whdr) + sizeof(*ack) + 3 + sizeof(*trailer);
+ txb->offset = 0;
+ txb->flags = call->conn->out_clientflag;
+ txb->ack_rwind = 0;
+ txb->seq = 0;
+ txb->serial = 0;
+ txb->cksum = 0;
+ txb->nr_kvec = 3;
+ txb->kvec[0].iov_base = whdr;
+ txb->kvec[0].iov_len = sizeof(*whdr) + sizeof(*ack);
+ txb->kvec[1].iov_base = buf2;
+ txb->kvec[1].iov_len = sack_size;
+ txb->kvec[2].iov_base = filler;
+ txb->kvec[2].iov_len = 3 + sizeof(*trailer);
+
+ whdr->epoch = htonl(call->conn->proto.epoch);
+ whdr->cid = htonl(call->cid);
+ whdr->callNumber = htonl(call->call_id);
+ whdr->seq = 0;
+ whdr->type = RXRPC_PACKET_TYPE_ACK;
+ whdr->flags = 0;
+ whdr->userStatus = 0;
+ whdr->securityIndex = call->security_ix;
+ whdr->_rsvd = 0;
+ whdr->serviceId = htons(call->dest_srx.srx_service);
+
+ get_page(virt_to_head_page(trailer));
+
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 1,
+ rxrpc_txbuf_alloc_ack);
+ atomic_inc(&rxrpc_nr_txbuf);
return txb;
}
@@ -71,12 +172,15 @@ void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r, what);
}
-static void rxrpc_free_txbuf(struct rcu_head *rcu)
+static void rxrpc_free_txbuf(struct rxrpc_txbuf *txb)
{
- struct rxrpc_txbuf *txb = container_of(rcu, struct rxrpc_txbuf, rcu);
+ int i;
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 0,
rxrpc_txbuf_free);
+ for (i = 0; i < txb->nr_kvec; i++)
+ if (txb->kvec[i].iov_base)
+ page_frag_free(txb->kvec[i].iov_base);
kfree(txb);
atomic_dec(&rxrpc_nr_txbuf);
}
@@ -95,7 +199,7 @@ void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
dead = __refcount_dec_and_test(&txb->ref, &r);
trace_rxrpc_txbuf(debug_id, call_debug_id, seq, r - 1, what);
if (dead)
- call_rcu(&txb->rcu, rxrpc_free_txbuf);
+ rxrpc_free_txbuf(txb);
}
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 470c70deffe2..8180d0c12fce 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -737,16 +737,6 @@ config NET_ACT_SAMPLE
To compile this code as a module, choose M here: the
module will be called act_sample.
-config NET_ACT_IPT
- tristate "IPtables targets"
- depends on NET_CLS_ACT && NETFILTER && NETFILTER_XTABLES
- help
- Say Y here to be able to invoke iptables targets after successful
- classification.
-
- To compile this code as a module, choose M here: the
- module will be called act_ipt.
-
config NET_ACT_NAT
tristate "Stateless NAT"
depends on NET_CLS_ACT
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 3e30d7260493..9ee622fb1160 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1363,7 +1363,7 @@ struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags,
if (rtnl_held)
rtnl_unlock();
- request_module("act_%s", act_name);
+ request_module(NET_ACT_ALIAS_PREFIX "%s", act_name);
if (rtnl_held)
rtnl_lock();
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 6cfee6658103..0e3cf11ae5fc 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -401,6 +401,7 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
.init = tcf_bpf_init,
.size = sizeof(struct tcf_bpf),
};
+MODULE_ALIAS_NET_ACT("bpf");
static __net_init int bpf_init_net(struct net *net)
{
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index f8762756657d..0fce631e7c91 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -242,6 +242,7 @@ static struct tc_action_ops act_connmark_ops = {
.cleanup = tcf_connmark_cleanup,
.size = sizeof(struct tcf_connmark_info),
};
+MODULE_ALIAS_NET_ACT("connmark");
static __net_init int connmark_init_net(struct net *net)
{
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 7f8b1f2f2ed9..5cc8e407e791 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -709,6 +709,7 @@ static struct tc_action_ops act_csum_ops = {
.offload_act_setup = tcf_csum_offload_act_setup,
.size = sizeof(struct tcf_csum),
};
+MODULE_ALIAS_NET_ACT("csum");
static __net_init int csum_init_net(struct net *net)
{
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 6124d8b128d1..baac083fd8f1 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1600,6 +1600,7 @@ static struct tc_action_ops act_ct_ops = {
.offload_act_setup = tcf_ct_offload_act_setup,
.size = sizeof(struct tcf_ct),
};
+MODULE_ALIAS_NET_ACT("ct");
static __net_init int ct_init_net(struct net *net)
{
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index e620f9a84afe..5dd41a012110 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -363,6 +363,7 @@ static struct tc_action_ops act_ctinfo_ops = {
.cleanup= tcf_ctinfo_cleanup,
.size = sizeof(struct tcf_ctinfo),
};
+MODULE_ALIAS_NET_ACT("ctinfo");
static __net_init int ctinfo_init_net(struct net *net)
{
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 4af3b7ec249f..e949280eb800 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -296,6 +296,7 @@ static struct tc_action_ops act_gact_ops = {
.offload_act_setup = tcf_gact_offload_act_setup,
.size = sizeof(struct tcf_gact),
};
+MODULE_ALIAS_NET_ACT("gact");
static __net_init int gact_init_net(struct net *net)
{
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index c681cd011afd..1dd74125398a 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -645,6 +645,7 @@ static struct tc_action_ops act_gate_ops = {
.offload_act_setup = tcf_gate_offload_act_setup,
.size = sizeof(struct tcf_gate),
};
+MODULE_ALIAS_NET_ACT("gate");
static __net_init int gate_init_net(struct net *net)
{
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 0e867d13beb5..107c6d83dc5c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -889,6 +889,7 @@ static struct tc_action_ops act_ife_ops = {
.init = tcf_ife_init,
.size = sizeof(struct tcf_ife_info),
};
+MODULE_ALIAS_NET_ACT("ife");
static __net_init int ife_init_net(struct net *net)
{
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 6faa7d00da09..5b3814365924 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -635,6 +635,7 @@ static struct tc_action_ops act_mirred_ops = {
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev,
};
+MODULE_ALIAS_NET_ACT("mirred");
static __net_init int mirred_init_net(struct net *net)
{
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 34b8edb6cc77..44a37a71ae92 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -452,6 +452,7 @@ static struct tc_action_ops act_mpls_ops = {
.offload_act_setup = tcf_mpls_offload_act_setup,
.size = sizeof(struct tcf_mpls),
};
+MODULE_ALIAS_NET_ACT("mpls");
static __net_init int mpls_init_net(struct net *net)
{
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index a180e724634e..d541f553805f 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -324,6 +324,7 @@ static struct tc_action_ops act_nat_ops = {
.cleanup = tcf_nat_cleanup,
.size = sizeof(struct tcf_nat),
};
+MODULE_ALIAS_NET_ACT("nat");
static __net_init int nat_init_net(struct net *net)
{
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 2ef22969f274..fc0a35a7b62a 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -515,11 +515,11 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
spin_unlock_bh(&p->tcf_lock);
return -ENOBUFS;
}
+ opt->nkeys = parms->tcfp_nkeys;
memcpy(opt->keys, parms->tcfp_keys,
flex_array_size(opt, keys, parms->tcfp_nkeys));
opt->index = p->tcf_index;
- opt->nkeys = parms->tcfp_nkeys;
opt->flags = parms->tcfp_flags;
opt->action = p->tcf_action;
opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
@@ -620,6 +620,7 @@ static struct tc_action_ops act_pedit_ops = {
.offload_act_setup = tcf_pedit_offload_act_setup,
.size = sizeof(struct tcf_pedit),
};
+MODULE_ALIAS_NET_ACT("pedit");
static __net_init int pedit_init_net(struct net *net)
{
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index e119b4a3db9f..8555125ed34d 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -502,6 +502,7 @@ static struct tc_action_ops act_police_ops = {
.offload_act_setup = tcf_police_offload_act_setup,
.size = sizeof(struct tcf_police),
};
+MODULE_ALIAS_NET_ACT("police");
static __net_init int police_init_net(struct net *net)
{
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index c5c61efe6db4..a69b53d54039 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -316,6 +316,7 @@ static struct tc_action_ops act_sample_ops = {
.offload_act_setup = tcf_sample_offload_act_setup,
.size = sizeof(struct tcf_sample),
};
+MODULE_ALIAS_NET_ACT("sample");
static __net_init int sample_init_net(struct net *net)
{
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 0a3e92888295..f3abe0545989 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -209,6 +209,7 @@ static struct tc_action_ops act_simp_ops = {
.init = tcf_simp_init,
.size = sizeof(struct tcf_defact),
};
+MODULE_ALIAS_NET_ACT("simple");
static __net_init int simp_init_net(struct net *net)
{
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 754f78b35bb8..1f1d9ce3e968 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -426,6 +426,7 @@ static struct tc_action_ops act_skbedit_ops = {
.offload_act_setup = tcf_skbedit_offload_act_setup,
.size = sizeof(struct tcf_skbedit),
};
+MODULE_ALIAS_NET_ACT("skbedit");
static __net_init int skbedit_init_net(struct net *net)
{
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index bcb673ab0008..39945b139c48 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -287,6 +287,7 @@ static struct tc_action_ops act_skbmod_ops = {
.cleanup = tcf_skbmod_cleanup,
.size = sizeof(struct tcf_skbmod),
};
+MODULE_ALIAS_NET_ACT("skbmod");
static __net_init int skbmod_init_net(struct net *net)
{
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 300b08aa8283..1536f8b16f1b 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -842,6 +842,7 @@ static struct tc_action_ops act_tunnel_key_ops = {
.offload_act_setup = tcf_tunnel_key_offload_act_setup,
.size = sizeof(struct tcf_tunnel_key),
};
+MODULE_ALIAS_NET_ACT("tunnel_key");
static __net_init int tunnel_key_init_net(struct net *net)
{
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 836183011a7c..22f4b1e8ade9 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -427,6 +427,7 @@ static struct tc_action_ops act_vlan_ops = {
.offload_act_setup = tcf_vlan_offload_act_setup,
.size = sizeof(struct tcf_vlan),
};
+MODULE_ALIAS_NET_ACT("vlan");
static __net_init int vlan_init_net(struct net *net)
{
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ff3d396a65aa..ca5676b2668e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -257,7 +257,7 @@ tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
#ifdef CONFIG_MODULES
if (rtnl_held)
rtnl_unlock();
- request_module("cls_%s", kind);
+ request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
if (rtnl_held)
rtnl_lock();
ops = __tcf_proto_lookup_ops(kind);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index a1f56931330c..ecfaa4f9a04e 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -328,6 +328,7 @@ static struct tcf_proto_ops cls_basic_ops __read_mostly = {
.bind_class = basic_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("basic");
static int __init init_basic(void)
{
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 382c7a71f81f..5e83e890f6a4 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -693,6 +693,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
.dump = cls_bpf_dump,
.bind_class = cls_bpf_bind_class,
};
+MODULE_ALIAS_NET_CLS("bpf");
static int __init cls_bpf_init_mod(void)
{
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 7ee8dbf49ed0..424252982d6a 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -209,6 +209,7 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
.dump = cls_cgroup_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("cgroup");
static int __init init_cgroup_cls(void)
{
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6ab317b48d6c..5502998aace7 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -702,6 +702,7 @@ static struct tcf_proto_ops cls_flow_ops __read_mostly = {
.walk = flow_walk,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("flow");
static int __init cls_flow_init(void)
{
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 6ee7064c82fc..e1314674b4a9 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -3659,6 +3659,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.owner = THIS_MODULE,
.flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
};
+MODULE_ALIAS_NET_CLS("flower");
static int __init cls_fl_init(void)
{
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index afc534ee0a18..cdddc8695228 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -433,6 +433,7 @@ static struct tcf_proto_ops cls_fw_ops __read_mostly = {
.bind_class = fw_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("fw");
static int __init init_fw(void)
{
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index c4ed11df6254..9f1e62ca508d 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -398,6 +398,7 @@ static struct tcf_proto_ops cls_mall_ops __read_mostly = {
.bind_class = mall_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("matchall");
static int __init cls_mall_init(void)
{
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 12a505db4183..b9c58c040c30 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -671,6 +671,7 @@ static struct tcf_proto_ops cls_route4_ops __read_mostly = {
.bind_class = route4_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("route");
static int __init init_route4(void)
{
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 289e1755c26b..9412d88a99bc 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1453,6 +1453,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.bind_class = u32_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("u32");
static int __init init_u32(void)
{
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 87f6e3c6daa8..65e05b0c98e4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -228,7 +228,7 @@ int qdisc_set_default(const char *name)
if (!ops) {
/* Not found, drop lock and try to load module */
write_unlock(&qdisc_mod_lock);
- request_module("sch_%s", name);
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name);
@@ -1275,7 +1275,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
* go away in the mean time.
*/
rtnl_unlock();
- request_module("sch_%s", name);
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
rtnl_lock();
ops = qdisc_lookup_ops(kind);
if (ops != NULL) {
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 9cff99558694..edee926ccde8 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -3103,6 +3103,7 @@ static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
.dump_stats = cake_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("cake");
static int __init cake_module_init(void)
{
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index beece8e82c23..69001eff0315 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -546,6 +546,7 @@ static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
.dump = cbs_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("cbs");
static struct notifier_block cbs_device_notifier = {
.notifier_call = cbs_dev_notifier,
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index ae1da08e268f..ea108030c6b4 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -498,6 +498,7 @@ static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
.dump_stats = choke_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("choke");
static int __init choke_module_init(void)
{
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index d7a4874543de..ecb3f164bb25 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Codel - The Controlled-Delay Active Queue Management algorithm
*
@@ -7,37 +8,6 @@
* Implemented on linux by :
* Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
* Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the authors may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, provided that this notice is retained in full, this
- * software may be distributed under the terms of the GNU General
- * Public License ("GPL") version 2, in which case the provisions of the
- * GPL apply INSTEAD OF those given above.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
*/
#include <linux/module.h>
@@ -287,6 +257,7 @@ static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
.dump_stats = codel_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("codel");
static int __init codel_module_init(void)
{
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 097740a9afea..c69b999fae17 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -481,6 +481,7 @@ static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
.destroy = drr_destroy_qdisc,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("drr");
static int __init drr_init(void)
{
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index 4808159a5466..2e4bef713b6a 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -500,6 +500,7 @@ static struct Qdisc_ops etf_qdisc_ops __read_mostly = {
.dump = etf_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("etf");
static int __init etf_module_init(void)
{
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index f7c88495946b..835b4460b448 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -812,6 +812,7 @@ static struct Qdisc_ops ets_qdisc_ops __read_mostly = {
.dump = ets_qdisc_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("ets");
static int __init ets_init(void)
{
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 3a31c47fea9b..cdf23ff16f40 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -1264,6 +1264,7 @@ static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
.dump_stats = fq_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("fq");
static int __init fq_module_init(void)
{
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 8c4fee063436..79f9d6de6c85 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -717,6 +717,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
.dump_stats = fq_codel_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("fq_codel");
static int __init fq_codel_module_init(void)
{
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9b3e9262040b..ff5336493777 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -27,6 +27,7 @@
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
+#include <net/hotdata.h>
#include <trace/events/qdisc.h>
#include <trace/events/net.h>
#include <net/xfrm.h>
@@ -409,7 +410,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
void __qdisc_run(struct Qdisc *q)
{
- int quota = READ_ONCE(dev_tx_weight);
+ int quota = READ_ONCE(net_hotdata.dev_tx_weight);
int packets;
while (qdisc_restart(q, &packets)) {
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 8c61eb3dc943..79ba9dc70254 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -930,6 +930,7 @@ static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
.dump = gred_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("gred");
static int __init gred_module_init(void)
{
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 16c45da4036a..4e626df742d7 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1679,6 +1679,7 @@ static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct hfsc_sched),
.owner = THIS_MODULE
};
+MODULE_ALIAS_NET_SCH("hfsc");
static int __init
hfsc_init(void)
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index d26cd436cbe3..3f906df1435b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -702,6 +702,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
.dump_stats = hhf_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("hhf");
static int __init hhf_module_init(void)
{
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 7349233eaa9b..93e6fb56f3b5 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -2166,6 +2166,7 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.dump = htb_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("htb");
static int __init htb_module_init(void)
{
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 5fa9eaa79bfc..c2ef9dcf91d2 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -168,6 +168,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.ingress_block_get = ingress_ingress_block_get,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("ingress");
struct clsact_sched_data {
struct tcf_block *ingress_block;
@@ -344,6 +345,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.egress_block_get = clsact_egress_block_get,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("clsact");
static int __init ingress_module_init(void)
{
@@ -368,6 +370,5 @@ static void __exit ingress_module_exit(void)
module_init(ingress_module_init);
module_exit(ingress_module_exit);
-MODULE_ALIAS("sch_clsact");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 43e53ee00a56..225353fbb3f1 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -774,6 +774,7 @@ static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
.dump = mqprio_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("mqprio");
static int __init mqprio_module_init(void)
{
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index d66d5f0ec080..79e93a19d5fa 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -395,6 +395,7 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
.dump = multiq_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("multiq");
static int __init multiq_module_init(void)
{
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fa678eb88528..edc72962ae63 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -1293,6 +1293,7 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
.dump = netem_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("netem");
static int __init netem_module_init(void)
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 2da6250ec346..1764059b0635 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -556,6 +556,7 @@ static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
.dump_stats = pie_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("pie");
static int __init pie_module_init(void)
{
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 992f0c8d7988..cefb65201e17 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -213,6 +213,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
.reset = qdisc_reset_queue,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("plug");
static int __init plug_module_init(void)
{
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8ecdd3ef6f8e..cc30f7a32f1a 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -418,6 +418,7 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
.dump = prio_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("prio");
static int __init prio_module_init(void)
{
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 48a604c320c7..d584c0c25899 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1521,6 +1521,7 @@ static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
.destroy = qfq_destroy_qdisc,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("qfq");
static int __init qfq_init(void)
{
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 607b6c8b3a9b..b5f096588fae 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -548,6 +548,7 @@ static struct Qdisc_ops red_qdisc_ops __read_mostly = {
.dump_stats = red_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("red");
static int __init red_module_init(void)
{
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 1871a1c0224d..b717e15a3a17 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -709,6 +709,7 @@ static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
.dump_stats = sfb_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("sfb");
static int __init sfb_module_init(void)
{
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index eb77558fa367..e66f4afb920d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -925,6 +925,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
.dump = sfq_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("sfq");
static int __init sfq_module_init(void)
{
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index 28beb11762d8..b4dd626c309c 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -292,6 +292,7 @@ static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
.destroy = skbprio_destroy,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("skbprio");
static int __init skbprio_module_init(void)
{
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 31a8252bd09c..c5de70efdc86 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -40,6 +40,8 @@ static struct static_key_false taprio_have_working_mqprio;
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
+#define TAPRIO_SUPPORTED_FLAGS \
+ (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
#define TAPRIO_FLAGS_INVALID U32_MAX
struct sched_entry {
@@ -408,19 +410,6 @@ static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
return entry;
}
-static bool taprio_flags_valid(u32 flags)
-{
- /* Make sure no other flag bits are set. */
- if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
- TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
- return false;
- /* txtime-assist and full offload are mutually exclusive */
- if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
- (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
- return false;
- return true;
-}
-
/* This returns the tstamp value set by TCP in terms of the set clock. */
static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
{
@@ -1031,7 +1020,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] =
NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
- [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
+ [TCA_TAPRIO_ATTR_FLAGS] =
+ NLA_POLICY_MASK(NLA_U32, TAPRIO_SUPPORTED_FLAGS),
[TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
[TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED },
};
@@ -1815,33 +1805,6 @@ static int taprio_mqprio_cmp(const struct net_device *dev,
return 0;
}
-/* The semantics of the 'flags' argument in relation to 'change()'
- * requests, are interpreted following two rules (which are applied in
- * this order): (1) an omitted 'flags' argument is interpreted as
- * zero; (2) the 'flags' of a "running" taprio instance cannot be
- * changed.
- */
-static int taprio_new_flags(const struct nlattr *attr, u32 old,
- struct netlink_ext_ack *extack)
-{
- u32 new = 0;
-
- if (attr)
- new = nla_get_u32(attr);
-
- if (old != TAPRIO_FLAGS_INVALID && old != new) {
- NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
- return -EOPNOTSUPP;
- }
-
- if (!taprio_flags_valid(new)) {
- NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
- return -EINVAL;
- }
-
- return new;
-}
-
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -1852,6 +1815,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct net_device *dev = qdisc_dev(sch);
struct tc_mqprio_qopt *mqprio = NULL;
unsigned long flags;
+ u32 taprio_flags;
ktime_t start;
int i, err;
@@ -1863,12 +1827,28 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
- err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
- q->flags, extack);
- if (err < 0)
- return err;
+ /* The semantics of the 'flags' argument in relation to 'change()'
+ * requests, are interpreted following two rules (which are applied in
+ * this order): (1) an omitted 'flags' argument is interpreted as
+ * zero; (2) the 'flags' of a "running" taprio instance cannot be
+ * changed.
+ */
+ taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]) : 0;
- q->flags = err;
+ /* txtime-assist and full offload are mutually exclusive */
+ if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
+ (taprio_flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_ATTR_FLAGS],
+ "TXTIME_ASSIST and FULL_OFFLOAD are mutually exclusive");
+ return -EINVAL;
+ }
+
+ if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing 'flags' of a running schedule is not supported");
+ return -EOPNOTSUPP;
+ }
+ q->flags = taprio_flags;
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
@@ -2548,6 +2528,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.dump_stats = taprio_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("taprio");
static struct notifier_block taprio_device_notifier = {
.notifier_call = taprio_dev_notifier,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index dd6b1a723bf7..f1d09183ae63 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -608,6 +608,7 @@ static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
.dump = tbf_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("tbf");
static int __init tbf_module_init(void)
{
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index eb05131ff1dd..23359e522273 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -507,6 +507,7 @@ done:
}
static const struct inet_diag_handler sctp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = sctp_diag_dump,
.dump_one = sctp_diag_dump_one,
.idiag_get_info = sctp_diag_get_info,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 94c6dd53cd62..e849f368ed91 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1495,17 +1495,11 @@ static __init int sctp_init(void)
/* Allocate bind_bucket and chunk caches. */
status = -ENOBUFS;
- sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
- sizeof(struct sctp_bind_bucket),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ sctp_bucket_cachep = KMEM_CACHE(sctp_bind_bucket, SLAB_HWCACHE_ALIGN);
if (!sctp_bucket_cachep)
goto out;
- sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
- sizeof(struct sctp_chunk),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ sctp_chunk_cachep = KMEM_CACHE(sctp_chunk, SLAB_HWCACHE_ALIGN);
if (!sctp_chunk_cachep)
goto err_chunk_cachep;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6b9fcdb0952a..c67679a41044 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -67,6 +67,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
+#include <net/rps.h>
/* Forward declarations for internal helper functions. */
static bool sctp_writeable(const struct sock *sk);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 0f53a5c6fd9d..4b52b3b159c0 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1046,7 +1046,7 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
int rc = SMC_CLC_DECL_NOSMCDDEV;
struct smcd_dev *smcd;
int i = 1, entry = 1;
- bool is_virtual;
+ bool is_emulated;
u16 chid;
if (smcd_indicated(ini->smc_type_v1))
@@ -1058,12 +1058,12 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
chid = smc_ism_get_chid(smcd);
if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
continue;
- is_virtual = __smc_ism_is_virtual(chid);
+ is_emulated = __smc_ism_is_emulated(chid);
if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
- if (is_virtual && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
+ if (is_emulated && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
/* It's the last GID-CHID entry left in CLC
- * Proposal SMC-Dv2 extension, but a virtual
+ * Proposal SMC-Dv2 extension, but an Emulated-
* ISM device will take two entries. So give
* up it and try the next potential ISM device.
*/
@@ -1073,7 +1073,7 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
ini->is_smcd = true;
rc = 0;
i++;
- entry = is_virtual ? entry + 2 : entry + 1;
+ entry = is_emulated ? entry + 2 : entry + 1;
if (entry > SMCD_CLC_MAX_V2_GID_ENTRIES)
break;
}
@@ -1414,10 +1414,10 @@ static int smc_connect_ism(struct smc_sock *smc,
if (rc)
return rc;
- if (__smc_ism_is_virtual(ini->ism_chid[ini->ism_selected]))
+ if (__smc_ism_is_emulated(ini->ism_chid[ini->ism_selected]))
ini->ism_peer_gid[ini->ism_selected].gid_ext =
ntohll(aclc->d1.gid_ext);
- /* for non-virtual ISM devices, peer gid_ext remains 0. */
+ /* for non-Emulated-ISM devices, peer gid_ext remains 0. */
}
ini->ism_peer_gid[ini->ism_selected].gid = ntohll(aclc->d0.gid);
@@ -2118,10 +2118,10 @@ static void smc_check_ism_v2_match(struct smc_init_info *ini,
if (smc_ism_get_chid(smcd) == proposed_chid &&
!smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
ini->ism_peer_gid[*matches].gid = proposed_gid->gid;
- if (__smc_ism_is_virtual(proposed_chid))
+ if (__smc_ism_is_emulated(proposed_chid))
ini->ism_peer_gid[*matches].gid_ext =
proposed_gid->gid_ext;
- /* non-virtual ISM's peer gid_ext remains 0. */
+ /* non-Emulated-ISM's peer gid_ext remains 0. */
ini->ism_dev[*matches] = smcd;
(*matches)++;
break;
@@ -2171,10 +2171,10 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
smcd_gid.gid = ntohll(smcd_v2_ext->gidchid[i].gid);
smcd_gid.gid_ext = 0;
chid = ntohs(smcd_v2_ext->gidchid[i].chid);
- if (__smc_ism_is_virtual(chid)) {
+ if (__smc_ism_is_emulated(chid)) {
if ((i + 1) == smc_v2_ext->hdr.ism_gid_cnt ||
chid != ntohs(smcd_v2_ext->gidchid[i + 1].chid))
- /* each virtual ISM device takes two GID-CHID
+ /* each Emulated-ISM device takes two GID-CHID
* entries and CHID of the second entry repeats
* that of the first entry.
*
diff --git a/net/smc/smc.h b/net/smc/smc.h
index df64efd2dee8..18c8b7870198 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -56,11 +56,11 @@ enum smc_state { /* possible states of an SMC socket */
};
enum smc_supplemental_features {
- SMC_SPF_VIRT_ISM_DEV = 0,
+ SMC_SPF_EMULATED_ISM_DEV = 0,
};
#define SMC_FEATURE_MASK \
- (BIT(SMC_SPF_VIRT_ISM_DEV))
+ (BIT(SMC_SPF_EMULATED_ISM_DEV))
struct smc_link_group;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 9a13709bea1c..e55026c7529c 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -952,8 +952,8 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
gidchids[entry].chid =
htons(smc_ism_get_chid(ini->ism_dev[i]));
gidchids[entry].gid = htonll(smcd_gid.gid);
- if (smc_ism_is_virtual(smcd)) {
- /* a virtual ISM device takes two
+ if (smc_ism_is_emulated(smcd)) {
+ /* an Emulated-ISM device takes two
* entries. CHID of the second entry
* repeats that of the first entry.
*/
@@ -1055,7 +1055,7 @@ smcd_clc_prep_confirm_accept(struct smc_connection *conn,
clc->d1.chid = htons(chid);
if (eid && eid[0])
memcpy(clc->d1.eid, eid, SMC_MAX_EID_LEN);
- if (__smc_ism_is_virtual(chid))
+ if (__smc_ism_is_emulated(chid))
clc->d1.gid_ext = htonll(smcd_gid.gid_ext);
len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
if (first_contact) {
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index a9f9bdd26dcd..7cc7070b9772 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -175,7 +175,7 @@ struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
#define SMCD_CLC_MAX_V2_GID_ENTRIES 8 /* max # of CHID-GID entries in CLC
* proposal SMC-Dv2 extension.
* each ISM device takes one entry and
- * each virtual ISM takes two entries.
+ * each Emulated-ISM takes two entries
*/
struct smc_clc_msg_proposal_area {
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index e4c858411207..9b84d5897aa5 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1535,7 +1535,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid,
list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
if ((!peer_gid->gid ||
(lgr->peer_gid.gid == peer_gid->gid &&
- !smc_ism_is_virtual(dev) ? 1 :
+ !smc_ism_is_emulated(dev) ? 1 :
lgr->peer_gid.gid_ext == peer_gid->gid_ext)) &&
(vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
if (peer_gid->gid) /* peer triggered termination */
@@ -1881,7 +1881,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
lgr->smcd != smcismdev)
return false;
- if (smc_ism_is_virtual(smcismdev) &&
+ if (smc_ism_is_emulated(smcismdev) &&
lgr->peer_gid.gid_ext != peer_gid->gid_ext)
return false;
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 5a33908015f3..6fdb2d96777a 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -255,6 +255,7 @@ static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler smc_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_SMC,
.dump = smc_diag_handler_dump,
};
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index ffff40c30a06..165cd013404b 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -15,7 +15,7 @@
#include "smc.h"
-#define SMC_VIRTUAL_ISM_CHID_MASK 0xFF00
+#define SMC_EMULATED_ISM_CHID_MASK 0xFF00
#define SMC_ISM_IDENT_MASK 0x00FFFF
struct smcd_dev_list { /* List of SMCD devices */
@@ -66,10 +66,10 @@ static inline int smc_ism_write(struct smcd_dev *smcd, u64 dmb_tok,
return rc < 0 ? rc : 0;
}
-static inline bool __smc_ism_is_virtual(u16 chid)
+static inline bool __smc_ism_is_emulated(u16 chid)
{
/* CHIDs in range of 0xFF00 to 0xFFFF are reserved
- * for virtual ISM device.
+ * for Emulated-ISM device.
*
* loopback-ism: 0xFFFF
* virtio-ism: 0xFF00 ~ 0xFFFE
@@ -77,11 +77,11 @@ static inline bool __smc_ism_is_virtual(u16 chid)
return ((chid & 0xFF00) == 0xFF00);
}
-static inline bool smc_ism_is_virtual(struct smcd_dev *smcd)
+static inline bool smc_ism_is_emulated(struct smcd_dev *smcd)
{
u16 chid = smcd->ops->get_chid(smcd);
- return __smc_ism_is_virtual(chid);
+ return __smc_ism_is_emulated(chid);
}
#endif
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 9f2c58c5a86b..2adb92b8c469 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -806,6 +806,16 @@ static void smc_pnet_create_pnetids_list(struct net *net)
u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
struct net_device *dev;
+ /* Newly created netns do not have devices.
+ * Do not even acquire rtnl.
+ */
+ if (list_empty(&net->dev_base_head))
+ return;
+
+ /* Note: This might not be needed, because smc_pnet_netdev_event()
+ * is also calling smc_pnet_add_base_pnetid() when handling
+ * NETDEV_UP event.
+ */
rtnl_lock();
for_each_netdev(net, dev)
smc_pnet_add_base_pnetid(net, dev, ndev_pnetid);
diff --git a/net/socket.c b/net/socket.c
index ed3df2f749bf..7e9c8fc9a5b4 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -343,7 +343,7 @@ static void init_inodecache(void)
0,
(SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
- SLAB_MEM_SPREAD | SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
BUG_ON(sock_inode_cachep == NULL);
}
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index be1c4003d67d..bb0d71eb02a6 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -32,16 +32,17 @@ config TIPC_MEDIA_UDP
bool "IP/UDP media type support"
depends on TIPC
select NET_UDP_TUNNEL
+ default y
help
Saying Y here will enable support for running TIPC over IP/UDP
- bool
- default y
+
config TIPC_CRYPTO
bool "TIPC encryption support"
depends on TIPC
select CRYPTO
select CRYPTO_AES
select CRYPTO_GCM
+ default y
help
Saying Y here will enable support for TIPC encryption.
All TIPC messages will be encrypted/decrypted by using the currently most
@@ -49,8 +50,6 @@ config TIPC_CRYPTO
entering the TIPC stack.
Key setting from user-space is performed via netlink by a user program
(e.g. the iproute2 'tipc' tool).
- bool
- default y
config TIPC_DIAG
tristate "TIPC: socket monitoring interface"
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index ee49a9f1dd4f..18e1636aa036 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -18,5 +18,5 @@ tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o
-
-obj-$(CONFIG_TIPC_DIAG) += diag.o
+obj-$(CONFIG_TIPC_DIAG) += tipc_diag.o
+tipc_diag-y += diag.o
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 878415c43527..5a526ebafeb4 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1079,30 +1079,27 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
b = tipc_bearer_find(net, name);
if (!b) {
- rtnl_unlock();
NL_SET_ERR_MSG(info->extack, "Bearer not found");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
#ifdef CONFIG_TIPC_MEDIA_UDP
if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
- rtnl_unlock();
NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
err = tipc_udp_nl_bearer_add(b,
attrs[TIPC_NLA_BEARER_UDP_OPTS]);
- if (err) {
- rtnl_unlock();
- return err;
- }
}
#endif
+out:
rtnl_unlock();
- return 0;
+ return err;
}
int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tipc/diag.c b/net/tipc/diag.c
index 18733451c9e0..54dde8c4e4d4 100644
--- a/net/tipc/diag.c
+++ b/net/tipc/diag.c
@@ -95,6 +95,7 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
}
static const struct sock_diag_handler tipc_sock_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_TIPC,
.dump = tipc_sock_diag_handler_dump,
};
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3105abe97bb9..c1e890a82434 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -86,8 +86,6 @@ struct tipc_bclink_entry {
* @lock: rwlock governing access to structure
* @net: the applicable net namespace
* @hash: links to adjacent nodes in unsorted hash chain
- * @inputq: pointer to input queue containing messages for msg event
- * @namedq: pointer to name table input queue with name table messages
* @active_links: bearer ids of active links, used as index into links[] array
* @links: array containing references to all links to node
* @bc_entry: broadcast link entry
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index bb1118d02f95..7e4135db5816 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -80,7 +80,6 @@ struct sockaddr_pair {
* @phdr: preformatted message header used when sending messages
* @cong_links: list of congested links
* @publications: list of publications for port
- * @blocking_link: address of the congested link we are currently sleeping on
* @pub_count: total # of publications port has made during its lifetime
* @conn_timeout: the time we can wait for an unresponded setup request
* @probe_unacked: probe has not received ack yet
diff --git a/net/unix/Kconfig b/net/unix/Kconfig
index 28b232f281ab..8b5d04210d7c 100644
--- a/net/unix/Kconfig
+++ b/net/unix/Kconfig
@@ -16,11 +16,6 @@ config UNIX
Say Y unless you know what you are doing.
-config UNIX_SCM
- bool
- depends on UNIX
- default y
-
config AF_UNIX_OOB
bool
depends on UNIX
diff --git a/net/unix/Makefile b/net/unix/Makefile
index 20491825b4d0..4ddd125c4642 100644
--- a/net/unix/Makefile
+++ b/net/unix/Makefile
@@ -11,5 +11,3 @@ unix-$(CONFIG_BPF_SYSCALL) += unix_bpf.o
obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
unix_diag-y := diag.o
-
-obj-$(CONFIG_UNIX_SCM) += scm.o
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0748e7ea5210..5b41e2321209 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -118,8 +118,6 @@
#include <linux/btf_ids.h>
#include <linux/bpf-cgroup.h>
-#include "scm.h"
-
static atomic_long_t unix_nr_socks;
static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
@@ -980,11 +978,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor;
- u = unix_sk(sk);
+ u = unix_sk(sk);
+ u->inflight = 0;
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
- atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
@@ -1775,6 +1773,52 @@ out:
return err;
}
+/* The "user->unix_inflight" variable is protected by the garbage
+ * collection lock, and we just read it locklessly here. If you go
+ * over the limit, there might be a tiny race in actually noticing
+ * it across threads. Tough.
+ */
+static inline bool too_many_unix_fds(struct task_struct *p)
+{
+ struct user_struct *user = current_user();
+
+ if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
+ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+ return false;
+}
+
+static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ int i;
+
+ if (too_many_unix_fds(current))
+ return -ETOOMANYREFS;
+
+ /* Need to duplicate file references for the sake of garbage
+ * collection. Otherwise a socket in the fps might become a
+ * candidate for GC while the skb is not yet queued.
+ */
+ UNIXCB(skb).fp = scm_fp_dup(scm->fp);
+ if (!UNIXCB(skb).fp)
+ return -ENOMEM;
+
+ for (i = scm->fp->count - 1; i >= 0; i--)
+ unix_inflight(scm->fp->user, scm->fp->fp[i]);
+
+ return 0;
+}
+
+static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ int i;
+
+ scm->fp = UNIXCB(skb).fp;
+ UNIXCB(skb).fp = NULL;
+
+ for (i = scm->fp->count - 1; i >= 0; i--)
+ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
+}
+
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
@@ -1822,6 +1866,21 @@ static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
spin_unlock(&unix_gc_lock);
}
+static void unix_destruct_scm(struct sk_buff *skb)
+{
+ struct scm_cookie scm;
+
+ memset(&scm, 0, sizeof(scm));
+ scm.pid = UNIXCB(skb).pid;
+ if (UNIXCB(skb).fp)
+ unix_detach_fds(&scm, skb);
+
+ /* Alas, it calls VFS */
+ /* So fscking what? fput() had been SMP-safe since the last Summer */
+ scm_destroy(&scm);
+ sock_wfree(skb);
+}
+
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
@@ -1908,11 +1967,12 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
long timeo;
int err;
- wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
+ wait_for_unix_gc(scm.fp);
+
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
@@ -2184,11 +2244,12 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
bool fds_sent = false;
int data_len;
- wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
+ wait_for_unix_gc(scm.fp);
+
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) {
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
diff --git a/net/unix/diag.c b/net/unix/diag.c
index be19827eca36..ae39538c5042 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -322,6 +322,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler unix_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_UNIX,
.dump = unix_diag_handler_dump,
};
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 2a81880dac7b..fa39b6265238 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -81,12 +81,80 @@
#include <net/scm.h>
#include <net/tcp_states.h>
-#include "scm.h"
+struct unix_sock *unix_get_socket(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+
+ /* Socket ? */
+ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
+ struct socket *sock = SOCKET_I(inode);
+ const struct proto_ops *ops;
+ struct sock *sk = sock->sk;
+
+ ops = READ_ONCE(sock->ops);
-/* Internal data structures and random procedures: */
+ /* PF_UNIX ? */
+ if (sk && ops && ops->family == PF_UNIX)
+ return unix_sk(sk);
+ }
+
+ return NULL;
+}
+DEFINE_SPINLOCK(unix_gc_lock);
+unsigned int unix_tot_inflight;
static LIST_HEAD(gc_candidates);
-static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
+static LIST_HEAD(gc_inflight_list);
+
+/* Keep the number of times in flight count for the file
+ * descriptor if it is for an AF_UNIX socket.
+ */
+void unix_inflight(struct user_struct *user, struct file *filp)
+{
+ struct unix_sock *u = unix_get_socket(filp);
+
+ spin_lock(&unix_gc_lock);
+
+ if (u) {
+ if (!u->inflight) {
+ WARN_ON_ONCE(!list_empty(&u->link));
+ list_add_tail(&u->link, &gc_inflight_list);
+ } else {
+ WARN_ON_ONCE(list_empty(&u->link));
+ }
+ u->inflight++;
+
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
+ }
+
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
+
+ spin_unlock(&unix_gc_lock);
+}
+
+void unix_notinflight(struct user_struct *user, struct file *filp)
+{
+ struct unix_sock *u = unix_get_socket(filp);
+
+ spin_lock(&unix_gc_lock);
+
+ if (u) {
+ WARN_ON_ONCE(!u->inflight);
+ WARN_ON_ONCE(list_empty(&u->link));
+
+ u->inflight--;
+ if (!u->inflight)
+ list_del_init(&u->link);
+
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
+ }
+
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
+
+ spin_unlock(&unix_gc_lock);
+}
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
@@ -105,20 +173,15 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
while (nfd--) {
/* Get the socket the fd matches if it indeed does so */
- struct sock *sk = unix_get_socket(*fp++);
-
- if (sk) {
- struct unix_sock *u = unix_sk(sk);
+ struct unix_sock *u = unix_get_socket(*fp++);
- /* Ignore non-candidates, they could
- * have been added to the queues after
- * starting the garbage collection
- */
- if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
- hit = true;
+ /* Ignore non-candidates, they could have been added
+ * to the queues after starting the garbage collection
+ */
+ if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
+ hit = true;
- func(u);
- }
+ func(u);
}
}
if (hit && hitlist != NULL) {
@@ -151,7 +214,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
/* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
- BUG_ON(!list_empty(&u->link));
+ WARN_ON_ONCE(!list_empty(&u->link));
list_add_tail(&u->link, &embryos);
}
spin_unlock(&x->sk_receive_queue.lock);
@@ -166,17 +229,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
static void dec_inflight(struct unix_sock *usk)
{
- atomic_long_dec(&usk->inflight);
+ usk->inflight--;
}
static void inc_inflight(struct unix_sock *usk)
{
- atomic_long_inc(&usk->inflight);
+ usk->inflight++;
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
- atomic_long_inc(&u->inflight);
+ u->inflight++;
+
/* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
@@ -186,40 +250,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
}
static bool gc_in_progress;
-#define UNIX_INFLIGHT_TRIGGER_GC 16000
-
-void wait_for_unix_gc(void)
-{
- /* If number of inflight sockets is insane,
- * force a garbage collect right now.
- * Paired with the WRITE_ONCE() in unix_inflight(),
- * unix_notinflight() and gc_in_progress().
- */
- if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
- !READ_ONCE(gc_in_progress))
- unix_gc();
- wait_event(unix_gc_wait, gc_in_progress == false);
-}
-/* The external entry point: unix_gc() */
-void unix_gc(void)
+static void __unix_gc(struct work_struct *work)
{
- struct sk_buff *next_skb, *skb;
- struct unix_sock *u;
- struct unix_sock *next;
struct sk_buff_head hitlist;
- struct list_head cursor;
+ struct unix_sock *u, *next;
LIST_HEAD(not_cycle_list);
+ struct list_head cursor;
spin_lock(&unix_gc_lock);
- /* Avoid a recursive GC. */
- if (gc_in_progress)
- goto out;
-
- /* Paired with READ_ONCE() in wait_for_unix_gc(). */
- WRITE_ONCE(gc_in_progress, true);
-
/* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
@@ -237,14 +277,12 @@ void unix_gc(void)
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
- long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
- inflight_refs = atomic_long_read(&u->inflight);
- BUG_ON(inflight_refs < 1);
- BUG_ON(total_refs < inflight_refs);
- if (total_refs == inflight_refs) {
+ WARN_ON_ONCE(!u->inflight);
+ WARN_ON_ONCE(total_refs < u->inflight);
+ if (total_refs == u->inflight) {
list_move_tail(&u->link, &gc_candidates);
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
@@ -271,7 +309,7 @@ void unix_gc(void)
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
- if (atomic_long_read(&u->inflight) > 0) {
+ if (u->inflight) {
list_move_tail(&u->link, &not_cycle_list);
__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
scan_children(&u->sk, inc_inflight_move_tail, NULL);
@@ -306,38 +344,50 @@ void unix_gc(void)
spin_unlock(&unix_gc_lock);
- /* We need io_uring to clean its registered files, ignore all io_uring
- * originated skbs. It's fine as io_uring doesn't keep references to
- * other io_uring instances and so killing all other files in the cycle
- * will put all io_uring references forcing it to go through normal
- * release.path eventually putting registered files.
- */
- skb_queue_walk_safe(&hitlist, skb, next_skb) {
- if (skb->destructor == io_uring_destruct_scm) {
- __skb_unlink(skb, &hitlist);
- skb_queue_tail(&skb->sk->sk_receive_queue, skb);
- }
- }
-
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
- /* There could be io_uring registered files, just push them back to
- * the inflight list
- */
- list_for_each_entry_safe(u, next, &gc_candidates, link)
- list_move_tail(&u->link, &gc_inflight_list);
-
/* All candidates should have been detached by now. */
- BUG_ON(!list_empty(&gc_candidates));
+ WARN_ON_ONCE(!list_empty(&gc_candidates));
/* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, false);
- wake_up(&unix_gc_wait);
-
- out:
spin_unlock(&unix_gc_lock);
}
+
+static DECLARE_WORK(unix_gc_work, __unix_gc);
+
+void unix_gc(void)
+{
+ WRITE_ONCE(gc_in_progress, true);
+ queue_work(system_unbound_wq, &unix_gc_work);
+}
+
+#define UNIX_INFLIGHT_TRIGGER_GC 16000
+#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
+
+void wait_for_unix_gc(struct scm_fp_list *fpl)
+{
+ /* If number of inflight sockets is insane,
+ * force a garbage collect right now.
+ *
+ * Paired with the WRITE_ONCE() in unix_inflight(),
+ * unix_notinflight(), and __unix_gc().
+ */
+ if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
+ !READ_ONCE(gc_in_progress))
+ unix_gc();
+
+ /* Penalise users who want to send AF_UNIX sockets
+ * but whose sockets have not been received yet.
+ */
+ if (!fpl || !fpl->count_unix ||
+ READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
+ return;
+
+ if (READ_ONCE(gc_in_progress))
+ flush_work(&unix_gc_work);
+}
diff --git a/net/unix/scm.c b/net/unix/scm.c
deleted file mode 100644
index 822ce0d0d791..000000000000
--- a/net/unix/scm.c
+++ /dev/null
@@ -1,159 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/net.h>
-#include <linux/fs.h>
-#include <net/af_unix.h>
-#include <net/scm.h>
-#include <linux/init.h>
-#include <linux/io_uring.h>
-
-#include "scm.h"
-
-unsigned int unix_tot_inflight;
-EXPORT_SYMBOL(unix_tot_inflight);
-
-LIST_HEAD(gc_inflight_list);
-EXPORT_SYMBOL(gc_inflight_list);
-
-DEFINE_SPINLOCK(unix_gc_lock);
-EXPORT_SYMBOL(unix_gc_lock);
-
-struct sock *unix_get_socket(struct file *filp)
-{
- struct sock *u_sock = NULL;
- struct inode *inode = file_inode(filp);
-
- /* Socket ? */
- if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
- struct socket *sock = SOCKET_I(inode);
- const struct proto_ops *ops = READ_ONCE(sock->ops);
- struct sock *s = sock->sk;
-
- /* PF_UNIX ? */
- if (s && ops && ops->family == PF_UNIX)
- u_sock = s;
- }
-
- return u_sock;
-}
-EXPORT_SYMBOL(unix_get_socket);
-
-/* Keep the number of times in flight count for the file
- * descriptor if it is for an AF_UNIX socket.
- */
-void unix_inflight(struct user_struct *user, struct file *fp)
-{
- struct sock *s = unix_get_socket(fp);
-
- spin_lock(&unix_gc_lock);
-
- if (s) {
- struct unix_sock *u = unix_sk(s);
-
- if (atomic_long_inc_return(&u->inflight) == 1) {
- BUG_ON(!list_empty(&u->link));
- list_add_tail(&u->link, &gc_inflight_list);
- } else {
- BUG_ON(list_empty(&u->link));
- }
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
- }
- WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
- spin_unlock(&unix_gc_lock);
-}
-
-void unix_notinflight(struct user_struct *user, struct file *fp)
-{
- struct sock *s = unix_get_socket(fp);
-
- spin_lock(&unix_gc_lock);
-
- if (s) {
- struct unix_sock *u = unix_sk(s);
-
- BUG_ON(!atomic_long_read(&u->inflight));
- BUG_ON(list_empty(&u->link));
-
- if (atomic_long_dec_and_test(&u->inflight))
- list_del_init(&u->link);
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
- }
- WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
- spin_unlock(&unix_gc_lock);
-}
-
-/*
- * The "user->unix_inflight" variable is protected by the garbage
- * collection lock, and we just read it locklessly here. If you go
- * over the limit, there might be a tiny race in actually noticing
- * it across threads. Tough.
- */
-static inline bool too_many_unix_fds(struct task_struct *p)
-{
- struct user_struct *user = current_user();
-
- if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
- return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
- return false;
-}
-
-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
-{
- int i;
-
- if (too_many_unix_fds(current))
- return -ETOOMANYREFS;
-
- /*
- * Need to duplicate file references for the sake of garbage
- * collection. Otherwise a socket in the fps might become a
- * candidate for GC while the skb is not yet queued.
- */
- UNIXCB(skb).fp = scm_fp_dup(scm->fp);
- if (!UNIXCB(skb).fp)
- return -ENOMEM;
-
- for (i = scm->fp->count - 1; i >= 0; i--)
- unix_inflight(scm->fp->user, scm->fp->fp[i]);
- return 0;
-}
-EXPORT_SYMBOL(unix_attach_fds);
-
-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
-{
- int i;
-
- scm->fp = UNIXCB(skb).fp;
- UNIXCB(skb).fp = NULL;
-
- for (i = scm->fp->count-1; i >= 0; i--)
- unix_notinflight(scm->fp->user, scm->fp->fp[i]);
-}
-EXPORT_SYMBOL(unix_detach_fds);
-
-void unix_destruct_scm(struct sk_buff *skb)
-{
- struct scm_cookie scm;
-
- memset(&scm, 0, sizeof(scm));
- scm.pid = UNIXCB(skb).pid;
- if (UNIXCB(skb).fp)
- unix_detach_fds(&scm, skb);
-
- /* Alas, it calls VFS */
- /* So fscking what? fput() had been SMP-safe since the last Summer */
- scm_destroy(&scm);
- sock_wfree(skb);
-}
-EXPORT_SYMBOL(unix_destruct_scm);
-
-void io_uring_destruct_scm(struct sk_buff *skb)
-{
- unix_destruct_scm(skb);
-}
-EXPORT_SYMBOL(io_uring_destruct_scm);
diff --git a/net/unix/scm.h b/net/unix/scm.h
deleted file mode 100644
index 5a255a477f16..000000000000
--- a/net/unix/scm.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef NET_UNIX_SCM_H
-#define NET_UNIX_SCM_H
-
-extern struct list_head gc_inflight_list;
-extern spinlock_t unix_gc_lock;
-
-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb);
-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb);
-
-#endif
diff --git a/net/vmw_vsock/diag.c b/net/vmw_vsock/diag.c
index 2e29994f92ff..ab87ef66c1e8 100644
--- a/net/vmw_vsock/diag.c
+++ b/net/vmw_vsock/diag.c
@@ -157,6 +157,7 @@ static int vsock_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler vsock_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_VSOCK,
.dump = vsock_diag_handler_dump,
};
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index ceb9174c5c3d..3414b2c3abcc 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -6,7 +6,7 @@
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2018-2023 Intel Corporation
+ * Copyright 2018-2024 Intel Corporation
*/
#include <linux/export.h>
@@ -27,11 +27,10 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
if (WARN_ON(!chan))
return;
- chandef->chan = chan;
- chandef->freq1_offset = chan->freq_offset;
- chandef->center_freq2 = 0;
- chandef->edmg.bw_config = 0;
- chandef->edmg.channels = 0;
+ *chandef = (struct cfg80211_chan_def) {
+ .chan = chan,
+ .freq1_offset = chan->freq_offset,
+ };
switch (chan_type) {
case NL80211_CHAN_NO_HT:
@@ -56,6 +55,73 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
}
EXPORT_SYMBOL(cfg80211_chandef_create);
+struct cfg80211_per_bw_puncturing_values {
+ u8 len;
+ const u16 *valid_values;
+};
+
+static const u16 puncturing_values_80mhz[] = {
+ 0x8, 0x4, 0x2, 0x1
+};
+
+static const u16 puncturing_values_160mhz[] = {
+ 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
+};
+
+static const u16 puncturing_values_320mhz[] = {
+ 0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
+ 0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
+ 0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
+};
+
+#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
+ { \
+ .len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
+ .valid_values = puncturing_values_ ## _bw ## mhz \
+ }
+
+static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
+};
+
+static bool valid_puncturing_bitmap(const struct cfg80211_chan_def *chandef)
+{
+ u32 idx, i, start_freq, primary_center = chandef->chan->center_freq;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_80:
+ idx = 0;
+ start_freq = chandef->center_freq1 - 40;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ idx = 1;
+ start_freq = chandef->center_freq1 - 80;
+ break;
+ case NL80211_CHAN_WIDTH_320:
+ idx = 2;
+ start_freq = chandef->center_freq1 - 160;
+ break;
+ default:
+ return chandef->punctured == 0;
+ }
+
+ if (!chandef->punctured)
+ return true;
+
+ /* check if primary channel is punctured */
+ if (chandef->punctured & (u16)BIT((primary_center - start_freq) / 20))
+ return false;
+
+ for (i = 0; i < per_bw_puncturing[idx].len; i++) {
+ if (per_bw_puncturing[idx].valid_values[i] == chandef->punctured)
+ return true;
+ }
+
+ return false;
+}
+
static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef)
{
int max_contiguous = 0;
@@ -317,72 +383,81 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
!cfg80211_edmg_chandef_valid(chandef))
return false;
- return true;
+ return valid_puncturing_bitmap(chandef);
}
EXPORT_SYMBOL(cfg80211_chandef_valid);
-static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
- u32 *pri40, u32 *pri80, u32 *pri160)
+int cfg80211_chandef_primary(const struct cfg80211_chan_def *c,
+ enum nl80211_chan_width primary_chan_width,
+ u16 *punctured)
{
- int tmp;
+ int pri_width = nl80211_chan_width_to_mhz(primary_chan_width);
+ int width = cfg80211_chandef_get_width(c);
+ u32 control = c->chan->center_freq;
+ u32 center = c->center_freq1;
+ u16 _punct = 0;
- switch (c->width) {
- case NL80211_CHAN_WIDTH_40:
- *pri40 = c->center_freq1;
- *pri80 = 0;
- *pri160 = 0;
- break;
- case NL80211_CHAN_WIDTH_80:
- case NL80211_CHAN_WIDTH_80P80:
- *pri160 = 0;
- *pri80 = c->center_freq1;
- /* n_P20 */
- tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 20 + 40 * tmp;
- break;
- case NL80211_CHAN_WIDTH_160:
- *pri160 = c->center_freq1;
- /* n_P20 */
- tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 60 + 40 * tmp;
- /* n_P80 */
- tmp /= 2;
- *pri80 = c->center_freq1 - 40 + 80 * tmp;
- break;
- case NL80211_CHAN_WIDTH_320:
- /* n_P20 */
- tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 140 + 40 * tmp;
- /* n_P80 */
- tmp /= 2;
- *pri80 = c->center_freq1 - 120 + 80 * tmp;
- /* n_P160 */
- tmp /= 2;
- *pri160 = c->center_freq1 - 80 + 160 * tmp;
- break;
- default:
- WARN_ON_ONCE(1);
+ if (WARN_ON_ONCE(pri_width < 0 || width < 0))
+ return -1;
+
+ /* not intended to be called this way, can't determine */
+ if (WARN_ON_ONCE(pri_width > width))
+ return -1;
+
+ if (!punctured)
+ punctured = &_punct;
+
+ *punctured = c->punctured;
+
+ while (width > pri_width) {
+ unsigned int bits_to_drop = width / 20 / 2;
+
+ if (control > center) {
+ center += width / 4;
+ *punctured >>= bits_to_drop;
+ } else {
+ center -= width / 4;
+ *punctured &= (1 << bits_to_drop) - 1;
+ }
+ width /= 2;
}
+
+ return center;
}
+EXPORT_SYMBOL(cfg80211_chandef_primary);
-const struct cfg80211_chan_def *
-cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
- const struct cfg80211_chan_def *c2)
+static const struct cfg80211_chan_def *
+check_chandef_primary_compat(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2,
+ enum nl80211_chan_width primary_chan_width)
{
- u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80, c1_pri160, c2_pri160;
+ u16 punct_c1 = 0, punct_c2 = 0;
+
+ /* check primary is compatible -> error if not */
+ if (cfg80211_chandef_primary(c1, primary_chan_width, &punct_c1) !=
+ cfg80211_chandef_primary(c2, primary_chan_width, &punct_c2))
+ return ERR_PTR(-EINVAL);
+
+ if (punct_c1 != punct_c2)
+ return ERR_PTR(-EINVAL);
+
+ /* assumes c1 is smaller width, if that was just checked -> done */
+ if (c1->width == primary_chan_width)
+ return c2;
+
+ /* otherwise continue checking the next width */
+ return NULL;
+}
+
+static const struct cfg80211_chan_def *
+_cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2)
+{
+ const struct cfg80211_chan_def *ret;
/* If they are identical, return */
if (cfg80211_chandef_identical(c1, c2))
- return c1;
+ return c2;
/* otherwise, must have same control channel */
if (c1->chan != c2->chan)
@@ -396,53 +471,76 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
return NULL;
/*
- * can't be compatible if one of them is 5 or 10 MHz,
+ * can't be compatible if one of them is 5/10 MHz or S1G
* but they don't have the same width.
*/
- if (c1->width == NL80211_CHAN_WIDTH_5 ||
- c1->width == NL80211_CHAN_WIDTH_10 ||
- c2->width == NL80211_CHAN_WIDTH_5 ||
- c2->width == NL80211_CHAN_WIDTH_10)
+#define NARROW_OR_S1G(width) ((width) == NL80211_CHAN_WIDTH_5 || \
+ (width) == NL80211_CHAN_WIDTH_10 || \
+ (width) == NL80211_CHAN_WIDTH_1 || \
+ (width) == NL80211_CHAN_WIDTH_2 || \
+ (width) == NL80211_CHAN_WIDTH_4 || \
+ (width) == NL80211_CHAN_WIDTH_8 || \
+ (width) == NL80211_CHAN_WIDTH_16)
+
+ if (NARROW_OR_S1G(c1->width) || NARROW_OR_S1G(c2->width))
return NULL;
- if (c1->width == NL80211_CHAN_WIDTH_20_NOHT ||
- c1->width == NL80211_CHAN_WIDTH_20)
+ /*
+ * Make sure that c1 is always the narrower one, so that later
+ * we either return NULL or c2 and don't have to check both
+ * directions.
+ */
+ if (c1->width > c2->width)
+ swap(c1, c2);
+
+ /*
+ * No further checks needed if the "narrower" one is only 20 MHz.
+ * Here "narrower" includes being a 20 MHz non-HT channel vs. a
+ * 20 MHz HT (or later) one.
+ */
+ if (c1->width <= NL80211_CHAN_WIDTH_20)
return c2;
- if (c2->width == NL80211_CHAN_WIDTH_20_NOHT ||
- c2->width == NL80211_CHAN_WIDTH_20)
- return c1;
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_40);
+ if (ret)
+ return ret;
- chandef_primary_freqs(c1, &c1_pri40, &c1_pri80, &c1_pri160);
- chandef_primary_freqs(c2, &c2_pri40, &c2_pri80, &c2_pri160);
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_80);
+ if (ret)
+ return ret;
- if (c1_pri40 != c2_pri40)
+ /*
+ * If c1 is 80+80, then c2 is 160 or higher, but that cannot
+ * match. If c2 was also 80+80 it was already either accepted
+ * or rejected above (identical or not, respectively.)
+ */
+ if (c1->width == NL80211_CHAN_WIDTH_80P80)
return NULL;
- if (c1->width == NL80211_CHAN_WIDTH_40)
- return c2;
-
- if (c2->width == NL80211_CHAN_WIDTH_40)
- return c1;
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_160);
+ if (ret)
+ return ret;
- if (c1_pri80 != c2_pri80)
- return NULL;
+ /*
+ * Getting here would mean they're both wider than 160, have the
+ * same primary 160, but are not identical - this cannot happen
+ * since they must be 320 (no wider chandefs exist, at least yet.)
+ */
+ WARN_ON_ONCE(1);
- if (c1->width == NL80211_CHAN_WIDTH_80 &&
- c2->width > NL80211_CHAN_WIDTH_80)
- return c2;
+ return NULL;
+}
- if (c2->width == NL80211_CHAN_WIDTH_80 &&
- c1->width > NL80211_CHAN_WIDTH_80)
- return c1;
+const struct cfg80211_chan_def *
+cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2)
+{
+ const struct cfg80211_chan_def *ret;
- WARN_ON(!c1_pri160 && !c2_pri160);
- if (c1_pri160 && c2_pri160 && c1_pri160 != c2_pri160)
+ ret = _cfg80211_chandef_compatible(c1, c2);
+ if (IS_ERR(ret))
return NULL;
-
- if (c1->width > c2->width)
- return c1;
- return c2;
+ return ret;
}
EXPORT_SYMBOL(cfg80211_chandef_compatible);
@@ -1047,7 +1145,7 @@ EXPORT_SYMBOL(cfg80211_chandef_dfs_cac_time);
static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
u32 center_freq, u32 bandwidth,
- u32 prohibited_flags)
+ u32 prohibited_flags, bool monitor)
{
struct ieee80211_channel *c;
u32 freq, start_freq, end_freq;
@@ -1057,7 +1155,11 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
c = ieee80211_get_channel_khz(wiphy, freq);
- if (!c || c->flags & prohibited_flags)
+ if (!c)
+ return false;
+ if (monitor && c->flags & IEEE80211_CHAN_CAN_MONITOR)
+ continue;
+ if (c->flags & prohibited_flags)
return false;
}
@@ -1117,9 +1219,9 @@ static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
return true;
}
-bool cfg80211_chandef_usable(struct wiphy *wiphy,
- const struct cfg80211_chan_def *chandef,
- u32 prohibited_flags)
+bool _cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags, bool monitor)
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
@@ -1281,14 +1383,22 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
if (!cfg80211_secondary_chans_ok(wiphy,
ieee80211_chandef_to_khz(chandef),
- width, prohibited_flags))
+ width, prohibited_flags, monitor))
return false;
if (!chandef->center_freq2)
return true;
return cfg80211_secondary_chans_ok(wiphy,
MHZ_TO_KHZ(chandef->center_freq2),
- width, prohibited_flags);
+ width, prohibited_flags, monitor);
+}
+
+bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags)
+{
+ return _cfg80211_chandef_usable(wiphy, chandef, prohibited_flags,
+ false);
}
EXPORT_SYMBOL(cfg80211_chandef_usable);
@@ -1532,72 +1642,3 @@ struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
}
}
EXPORT_SYMBOL(wdev_chandef);
-
-struct cfg80211_per_bw_puncturing_values {
- u8 len;
- const u16 *valid_values;
-};
-
-static const u16 puncturing_values_80mhz[] = {
- 0x8, 0x4, 0x2, 0x1
-};
-
-static const u16 puncturing_values_160mhz[] = {
- 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
-};
-
-static const u16 puncturing_values_320mhz[] = {
- 0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
- 0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
- 0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
-};
-
-#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
- { \
- .len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
- .valid_values = puncturing_values_ ## _bw ## mhz \
- }
-
-static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
-};
-
-bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
- const struct cfg80211_chan_def *chandef)
-{
- u32 idx, i, start_freq;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80:
- idx = 0;
- start_freq = chandef->center_freq1 - 40;
- break;
- case NL80211_CHAN_WIDTH_160:
- idx = 1;
- start_freq = chandef->center_freq1 - 80;
- break;
- case NL80211_CHAN_WIDTH_320:
- idx = 2;
- start_freq = chandef->center_freq1 - 160;
- break;
- default:
- *bitmap = 0;
- break;
- }
-
- if (!*bitmap)
- return true;
-
- /* check if primary channel is punctured */
- if (*bitmap & (u16)BIT((chandef->chan->center_freq - start_freq) / 20))
- return false;
-
- for (i = 0; i < per_bw_puncturing[idx].len; i++)
- if (per_bw_puncturing[idx].valid_values[i] == *bitmap)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(cfg80211_valid_disable_subchannel_bitmap);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 13657a85cf61..118f2f619828 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,7 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -362,7 +362,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct cfg80211_auth_request *req);
int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct cfg80211_assoc_request *req);
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack);
int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
const u8 *ie, int ie_len, u16 reason,
@@ -491,6 +492,9 @@ bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
bool cfg80211_wdev_on_sub_chan(struct wireless_dev *wdev,
struct ieee80211_channel *chan,
bool primary_only);
+bool _cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags, bool monitor);
static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
{
@@ -549,9 +553,53 @@ int cfg80211_remove_virtual_intf(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask);
+/**
+ * struct cfg80211_colocated_ap - colocated AP information
+ *
+ * @list: linked list to all colocated APs
+ * @bssid: BSSID of the reported AP
+ * @ssid: SSID of the reported AP
+ * @ssid_len: length of the ssid
+ * @center_freq: frequency the reported AP is on
+ * @unsolicited_probe: the reported AP is part of an ESS, where all the APs
+ * that operate in the same channel as the reported AP and that might be
+ * detected by a STA receiving this frame, are transmitting unsolicited
+ * Probe Response frames every 20 TUs
+ * @oct_recommended: OCT is recommended to exchange MMPDUs with the reported AP
+ * @same_ssid: the reported AP has the same SSID as the reporting AP
+ * @multi_bss: the reported AP is part of a multiple BSSID set
+ * @transmitted_bssid: the reported AP is the transmitting BSSID
+ * @colocated_ess: all the APs that share the same ESS as the reported AP are
+ * colocated and can be discovered via legacy bands.
+ * @short_ssid_valid: short_ssid is valid and can be used
+ * @short_ssid: the short SSID for this SSID
+ * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP
+ */
+struct cfg80211_colocated_ap {
+ struct list_head list;
+ u8 bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ u32 short_ssid;
+ u32 center_freq;
+ u8 unsolicited_probe:1,
+ oct_recommended:1,
+ same_ssid:1,
+ multi_bss:1,
+ transmitted_bssid:1,
+ colocated_ess:1,
+ short_ssid_valid:1;
+ s8 psd_20;
+};
+
#if IS_ENABLED(CONFIG_CFG80211_KUNIT_TEST)
#define EXPORT_SYMBOL_IF_CFG80211_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
#define VISIBLE_IF_CFG80211_KUNIT
+void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list);
+
+int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ struct list_head *list);
+
size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
const u8 *subie, size_t subie_len,
u8 *new_ie, size_t new_ie_len);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f635a8b6ca2e..4052041a19ea 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2009, Jouni Malinen <j@w1.fi>
* Copyright (c) 2015 Intel Deutschland GmbH
- * Copyright (C) 2019-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2019-2020, 2022-2024 Intel Corporation
*/
#include <linux/kernel.h>
@@ -241,12 +241,12 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
char *buf = kmalloc(128, gfp);
if (buf) {
- sprintf(buf, "MLME-MICHAELMICFAILURE.indication("
- "keyid=%d %scast addr=%pM)", key_id,
- key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni",
- addr);
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
+ wrqu.data.length =
+ sprintf(buf, "MLME-MICHAELMICFAILURE."
+ "indication(keyid=%d %scast addr=%pM)",
+ key_id, key_type == NL80211_KEYTYPE_GROUP
+ ? "broad" : "uni", addr);
wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
kfree(buf);
}
@@ -325,28 +325,136 @@ void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
p1[i] &= p2[i];
}
-/* Note: caller must cfg80211_put_bss() regardless of result */
-int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct cfg80211_assoc_request *req)
+static int
+cfg80211_mlme_check_mlo_compat(const struct ieee80211_multi_link_elem *mle_a,
+ const struct ieee80211_multi_link_elem *mle_b,
+ struct netlink_ext_ack *extack)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err, i, j;
+ const struct ieee80211_mle_basic_common_info *common_a, *common_b;
- lockdep_assert_wiphy(wdev->wiphy);
+ common_a = (const void *)mle_a->variable;
+ common_b = (const void *)mle_b->variable;
+
+ if (memcmp(common_a->mld_mac_addr, common_b->mld_mac_addr, ETH_ALEN)) {
+ NL_SET_ERR_MSG(extack, "AP MLD address mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_a) !=
+ ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link EML medium sync delay mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_eml_cap((const u8 *)mle_a) !=
+ ieee80211_mle_get_eml_cap((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link EML capabilities mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_mld_capa_op((const u8 *)mle_a) !=
+ ieee80211_mle_get_mld_capa_op((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link MLD capabilities/ops mismatch");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cfg80211_mlme_check_mlo(struct net_device *dev,
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack)
+{
+ const struct ieee80211_multi_link_elem *mles[ARRAY_SIZE(req->links)] = {};
+ int i;
+
+ if (req->link_id < 0)
+ return 0;
+
+ if (!req->links[req->link_id].bss) {
+ NL_SET_ERR_MSG(extack, "no BSS for assoc link");
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(req->links); i++) {
+ const struct cfg80211_bss_ies *ies;
+ const struct element *ml;
- for (i = 1; i < ARRAY_SIZE(req->links); i++) {
if (!req->links[i].bss)
continue;
- for (j = 0; j < i; j++) {
- if (req->links[i].bss == req->links[j].bss)
- return -EINVAL;
+
+ if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr)) {
+ NL_SET_ERR_MSG(extack, "BSSID must not be our address");
+ req->links[i].error = -EINVAL;
+ goto error;
}
- if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr))
- return -EINVAL;
+ ies = rcu_dereference(req->links[i].bss->ies);
+ ml = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
+ ies->data, ies->len);
+ if (!ml) {
+ NL_SET_ERR_MSG(extack, "MLO BSS w/o ML element");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+
+ if (!ieee80211_mle_type_ok(ml->data + 1,
+ IEEE80211_ML_CONTROL_TYPE_BASIC,
+ ml->datalen - 1)) {
+ NL_SET_ERR_MSG(extack, "BSS with invalid ML element");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+
+ mles[i] = (const void *)(ml->data + 1);
+
+ if (ieee80211_mle_get_link_id((const u8 *)mles[i]) != i) {
+ NL_SET_ERR_MSG(extack, "link ID mismatch");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+ }
+
+ if (WARN_ON(!mles[req->link_id]))
+ goto error;
+
+ for (i = 0; i < ARRAY_SIZE(req->links); i++) {
+ if (i == req->link_id || !req->links[i].bss)
+ continue;
+
+ if (WARN_ON(!mles[i]))
+ goto error;
+
+ if (cfg80211_mlme_check_mlo_compat(mles[req->link_id], mles[i],
+ extack)) {
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
}
+ rcu_read_unlock();
+ return 0;
+error:
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Note: caller must cfg80211_put_bss() regardless of result */
+int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ lockdep_assert_wiphy(wdev->wiphy);
+
+ err = cfg80211_mlme_check_mlo(dev, req, extack);
+ if (err)
+ return err;
+
if (wdev->connected &&
(!req->prev_bssid ||
!ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid)))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index bd54a928bab4..b4edba6b0b7b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/if.h>
@@ -581,7 +581,11 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
[NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
- [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+ [NL80211_ATTR_WPA_VERSIONS] =
+ NLA_POLICY_RANGE(NLA_U32, 0,
+ NL80211_WPA_VERSION_1 |
+ NL80211_WPA_VERSION_2 |
+ NL80211_WPA_VERSION_3),
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
[NL80211_ATTR_PMKID] = NLA_POLICY_EXACT_LEN_WARN(WLAN_PMKID_LEN),
@@ -821,6 +825,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA] = { .type = NLA_FLAG },
[NL80211_ATTR_MLO_TTLM_DLINK] = NLA_POLICY_EXACT_LEN(sizeof(u16) * 8),
[NL80211_ATTR_MLO_TTLM_ULINK] = NLA_POLICY_EXACT_LEN(sizeof(u16) * 8),
+ [NL80211_ATTR_ASSOC_SPP_AMSDU] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -906,22 +911,11 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
};
static const struct nla_policy
-nl80211_match_band_rssi_policy[NUM_NL80211_BANDS] = {
- [NL80211_BAND_2GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_5GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_6GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_60GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_LC] = { .type = NLA_S32 },
-};
-
-static const struct nla_policy
nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_SSID_LEN },
[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN),
[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
- [NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI] =
- NLA_POLICY_NESTED(nl80211_match_band_rssi_policy),
};
static const struct nla_policy
@@ -1204,11 +1198,11 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if ((chan->flags & IEEE80211_CHAN_DFS_CONCURRENT) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DFS_CONCURRENT))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_UHB_VLP_CLIENT) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT))
+ if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_UHB_AFC_CLIENT) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT))
+ if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT))
goto nla_put_failure;
}
@@ -3224,24 +3218,9 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
-static int nl80211_parse_punct_bitmap(struct cfg80211_registered_device *rdev,
- struct genl_info *info,
- const struct cfg80211_chan_def *chandef,
- u16 *punct_bitmap)
-{
- if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_PUNCT))
- return -EINVAL;
-
- *punct_bitmap = nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
- if (!cfg80211_valid_disable_subchannel_bitmap(punct_bitmap, chandef))
- return -EINVAL;
-
- return 0;
-}
-
-int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
- struct genl_info *info,
- struct cfg80211_chan_def *chandef)
+static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info, bool monitor,
+ struct cfg80211_chan_def *chandef)
{
struct netlink_ext_ack *extack = info->extack;
struct nlattr **attrs = info->attrs;
@@ -3266,10 +3245,9 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->freq1_offset = control_freq % 1000;
chandef->center_freq2 = 0;
- /* Primary channel not allowed */
- if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) {
+ if (!chandef->chan) {
NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ],
- "Channel is disabled");
+ "Unknown channel");
return -EINVAL;
}
@@ -3346,13 +3324,27 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->edmg.channels = 0;
}
+ if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
+ chandef->punctured =
+ nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
+
+ if (chandef->punctured &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_PUNCT)) {
+ NL_SET_ERR_MSG(extack,
+ "driver doesn't support puncturing");
+ return -EINVAL;
+ }
+ }
+
if (!cfg80211_chandef_valid(chandef)) {
NL_SET_ERR_MSG(extack, "invalid channel definition");
return -EINVAL;
}
- if (!cfg80211_chandef_usable(&rdev->wiphy, chandef,
- IEEE80211_CHAN_DISABLED)) {
+ if (!_cfg80211_chandef_usable(&rdev->wiphy, chandef,
+ IEEE80211_CHAN_DISABLED,
+ monitor)) {
NL_SET_ERR_MSG(extack, "(extension) channel is disabled");
return -EINVAL;
}
@@ -3367,6 +3359,13 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
return 0;
}
+int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
+ struct cfg80211_chan_def *chandef)
+{
+ return _nl80211_parse_chandef(rdev, info, false, chandef);
+}
+
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct genl_info *info,
@@ -3391,7 +3390,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
link_id = 0;
}
- result = nl80211_parse_chandef(rdev, info, &chandef);
+ result = _nl80211_parse_chandef(rdev, info,
+ iftype == NL80211_IFTYPE_MONITOR,
+ &chandef);
if (result)
return result;
@@ -3822,6 +3823,10 @@ int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *ch
if (chandef->center_freq2 &&
nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2))
return -ENOBUFS;
+ if (chandef->punctured &&
+ nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, chandef->punctured))
+ return -ENOBUFS;
+
return 0;
}
EXPORT_SYMBOL(nl80211_send_chandef);
@@ -4202,8 +4207,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
if (netif_running(dev))
return -EBUSY;
- BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
- IEEE80211_MAX_MESH_ID_LEN);
wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
memcpy(wdev->u.mesh.id,
@@ -4309,8 +4312,6 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_MESH_POINT:
if (!info->attrs[NL80211_ATTR_MESH_ID])
break;
- BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
- IEEE80211_MAX_MESH_ID_LEN);
wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
memcpy(wdev->u.mesh.id,
@@ -6069,14 +6070,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
- err = nl80211_parse_punct_bitmap(rdev, info,
- &params->chandef,
- &params->punct_bitmap);
- if (err)
- goto out;
- }
-
if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params->chandef,
wdev->iftype)) {
err = -EINVAL;
@@ -6876,7 +6869,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
return -EINVAL;
/* When you run into this, adjust the code below for the new flag */
- BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 8);
switch (statype) {
case CFG80211_STA_MESH_PEER_KERNEL:
@@ -6936,6 +6929,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
params->link_sta_params.he_capa ||
params->link_sta_params.eht_capa)
return -EINVAL;
+ if (params->sta_flags_mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ return -EINVAL;
}
if (statype != CFG80211_STA_AP_CLIENT &&
@@ -6959,7 +6954,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
BIT(NL80211_STA_FLAG_ASSOCIATED) |
BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
BIT(NL80211_STA_FLAG_WME) |
- BIT(NL80211_STA_FLAG_MFP)))
+ BIT(NL80211_STA_FLAG_MFP) |
+ BIT(NL80211_STA_FLAG_SPP_AMSDU)))
return -EINVAL;
/* but authenticated/associated only if driver handles it */
@@ -7518,7 +7514,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
/* When you run into this, adjust the code below for the new flag */
- BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 8);
switch (dev->ieee80211_ptr->iftype) {
case NL80211_IFTYPE_AP:
@@ -7542,6 +7538,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.sta_flags_mask & auth_assoc)
return -EINVAL;
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT) &&
+ params.sta_flags_mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ return -EINVAL;
+
/* Older userspace, or userspace wanting to be compatible with
* !NL80211_FEATURE_FULL_AP_CLIENT_STATE, will not set the auth
* and assoc flags in the mask, but assumes the station will be
@@ -7630,14 +7631,16 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
struct station_del_parameters params;
+ int link_id = nl80211_link_id_or_invalid(info->attrs);
memset(&params, 0, sizeof(params));
if (info->attrs[NL80211_ATTR_MAC])
params.mac = nla_data(info->attrs[NL80211_ATTR_MAC]);
- switch (dev->ieee80211_ptr->iftype) {
+ switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MESH_POINT:
@@ -7678,6 +7681,17 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
params.reason_code = WLAN_REASON_PREV_AUTH_NOT_VALID;
}
+ /* Link ID not expected in case of non-ML operation */
+ if (!wdev->valid_links && link_id != -1)
+ return -EINVAL;
+
+ /* If given, a valid link ID should be passed during MLO */
+ if (wdev->valid_links && link_id >= 0 &&
+ !(wdev->valid_links & BIT(link_id)))
+ return -EINVAL;
+
+ params.link_id = link_id;
+
return rdev_del_station(rdev, dev, &params);
}
@@ -9483,41 +9497,6 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
return 0;
}
-static int
-nl80211_parse_sched_scan_per_band_rssi(struct wiphy *wiphy,
- struct cfg80211_match_set *match_sets,
- struct nlattr *tb_band_rssi,
- s32 rssi_thold)
-{
- struct nlattr *attr;
- int i, tmp, ret = 0;
-
- if (!wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD)) {
- if (tb_band_rssi)
- ret = -EOPNOTSUPP;
- else
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- match_sets->per_band_rssi_thold[i] =
- NL80211_SCAN_RSSI_THOLD_OFF;
- return ret;
- }
-
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- match_sets->per_band_rssi_thold[i] = rssi_thold;
-
- nla_for_each_nested(attr, tb_band_rssi, tmp) {
- enum nl80211_band band = nla_type(attr);
-
- if (band < 0 || band >= NUM_NL80211_BANDS)
- return -EINVAL;
-
- match_sets->per_band_rssi_thold[band] = nla_get_s32(attr);
- }
-
- return 0;
-}
-
static struct cfg80211_sched_scan_request *
nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr **attrs, int max_match_sets)
@@ -9792,15 +9771,6 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
if (rssi)
request->match_sets[i].rssi_thold =
nla_get_s32(rssi);
-
- /* Parse per band RSSI attribute */
- err = nl80211_parse_sched_scan_per_band_rssi(wiphy,
- &request->match_sets[i],
- tb[NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI],
- request->match_sets[i].rssi_thold);
- if (err)
- goto out_free;
-
i++;
}
@@ -10080,6 +10050,42 @@ static int nl80211_notify_radar_detection(struct sk_buff *skb,
return 0;
}
+static int nl80211_parse_counter_offsets(struct cfg80211_registered_device *rdev,
+ const u8 *data, size_t datalen,
+ int first_count, struct nlattr *attr,
+ const u16 **offsets, unsigned int *n_offsets)
+{
+ int i;
+
+ *n_offsets = 0;
+
+ if (!attr)
+ return 0;
+
+ if (!nla_len(attr) || (nla_len(attr) % sizeof(u16)))
+ return -EINVAL;
+
+ *n_offsets = nla_len(attr) / sizeof(u16);
+ if (rdev->wiphy.max_num_csa_counters &&
+ (*n_offsets > rdev->wiphy.max_num_csa_counters))
+ return -EINVAL;
+
+ *offsets = nla_data(attr);
+
+ /* sanity checks - counters should fit and be the same */
+ for (i = 0; i < *n_offsets; i++) {
+ u16 offset = (*offsets)[i];
+
+ if (offset >= datalen)
+ return -EINVAL;
+
+ if (first_count != -1 && data[offset] != first_count)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -10091,7 +10097,6 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
int err;
bool need_new_beacon = false;
bool need_handle_dfs_flag = true;
- int len, i;
u32 cs_count;
if (!rdev->ops->channel_switch ||
@@ -10176,72 +10181,23 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
goto free;
}
- len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
- if (!len || (len % sizeof(u16))) {
- err = -EINVAL;
+ err = nl80211_parse_counter_offsets(rdev, params.beacon_csa.tail,
+ params.beacon_csa.tail_len,
+ params.count,
+ csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON],
+ &params.counter_offsets_beacon,
+ &params.n_counter_offsets_beacon);
+ if (err)
goto free;
- }
- params.n_counter_offsets_beacon = len / sizeof(u16);
- if (rdev->wiphy.max_num_csa_counters &&
- (params.n_counter_offsets_beacon >
- rdev->wiphy.max_num_csa_counters)) {
- err = -EINVAL;
+ err = nl80211_parse_counter_offsets(rdev, params.beacon_csa.probe_resp,
+ params.beacon_csa.probe_resp_len,
+ params.count,
+ csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP],
+ &params.counter_offsets_presp,
+ &params.n_counter_offsets_presp);
+ if (err)
goto free;
- }
-
- params.counter_offsets_beacon =
- nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
-
- /* sanity checks - counters should fit and be the same */
- for (i = 0; i < params.n_counter_offsets_beacon; i++) {
- u16 offset = params.counter_offsets_beacon[i];
-
- if (offset >= params.beacon_csa.tail_len) {
- err = -EINVAL;
- goto free;
- }
-
- if (params.beacon_csa.tail[offset] != params.count) {
- err = -EINVAL;
- goto free;
- }
- }
-
- if (csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]) {
- len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
- if (!len || (len % sizeof(u16))) {
- err = -EINVAL;
- goto free;
- }
-
- params.n_counter_offsets_presp = len / sizeof(u16);
- if (rdev->wiphy.max_num_csa_counters &&
- (params.n_counter_offsets_presp >
- rdev->wiphy.max_num_csa_counters)) {
- err = -EINVAL;
- goto free;
- }
-
- params.counter_offsets_presp =
- nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
-
- /* sanity checks - counters should fit and be the same */
- for (i = 0; i < params.n_counter_offsets_presp; i++) {
- u16 offset = params.counter_offsets_presp[i];
-
- if (offset >= params.beacon_csa.probe_resp_len) {
- err = -EINVAL;
- goto free;
- }
-
- if (params.beacon_csa.probe_resp[offset] !=
- params.count) {
- err = -EINVAL;
- goto free;
- }
- }
- }
skip_beacons:
err = nl80211_parse_chandef(rdev, info, &params.chandef);
@@ -10272,14 +10228,7 @@ skip_beacons:
if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
params.block_tx = true;
- if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
- err = nl80211_parse_punct_bitmap(rdev, info,
- &params.chandef,
- &params.punct_bitmap);
- if (err)
- goto free;
- }
-
+ params.link_id = link_id;
err = rdev_channel_switch(rdev, dev, &params);
free:
@@ -10652,13 +10601,6 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
return res;
}
-static bool nl80211_valid_wpa_versions(u32 wpa_versions)
-{
- return !(wpa_versions & ~(NL80211_WPA_VERSION_1 |
- NL80211_WPA_VERSION_2 |
- NL80211_WPA_VERSION_3));
-}
-
static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -10884,12 +10826,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
- if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) {
+ if (info->attrs[NL80211_ATTR_WPA_VERSIONS])
settings->wpa_versions =
nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]);
- if (!nl80211_valid_wpa_versions(settings->wpa_versions))
- return -EINVAL;
- }
if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
void *data;
@@ -11104,6 +11043,15 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
sizeof(req.s1g_capa));
}
+ if (nla_get_flag(info->attrs[NL80211_ATTR_ASSOC_SPP_AMSDU])) {
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT)) {
+ GENL_SET_ERR_MSG(info, "SPP A-MSDUs not supported");
+ return -EINVAL;
+ }
+ req.flags |= ASSOC_REQ_SPP_AMSDU;
+ }
+
req.link_id = nl80211_link_id_or_invalid(info->attrs);
if (info->attrs[NL80211_ATTR_MLO_LINKS]) {
@@ -11229,7 +11177,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
struct nlattr *link;
int rem = 0;
- err = cfg80211_mlme_assoc(rdev, dev, &req);
+ err = cfg80211_mlme_assoc(rdev, dev, &req,
+ info->extack);
if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
dev->ieee80211_ptr->conn_owner_nlportid =
@@ -12677,23 +12626,12 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
- if (info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]) {
- int len = nla_len(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
- int i;
-
- if (len % sizeof(u16))
- return -EINVAL;
-
- params.n_csa_offsets = len / sizeof(u16);
- params.csa_offsets =
- nla_data(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
-
- /* check that all the offsets fit the frame */
- for (i = 0; i < params.n_csa_offsets; i++) {
- if (params.csa_offsets[i] >= params.len)
- return -EINVAL;
- }
- }
+ err = nl80211_parse_counter_offsets(rdev, NULL, params.len, -1,
+ info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX],
+ &params.csa_offsets,
+ &params.n_csa_offsets);
+ if (err)
+ return err;
if (!params.dont_wait_for_ack) {
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -16830,6 +16768,10 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_station,
.flags = GENL_UNS_ADMIN_PERM,
+ /* cannot use NL80211_FLAG_MLO_VALID_LINK_ID, depends on
+ * whether MAC address is passed or not. If MAC address is
+ * passed, then even during MLO, link ID is not required.
+ */
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
@@ -19400,7 +19342,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef,
gfp_t gfp,
enum nl80211_commands notif,
- u8 count, bool quiet, u16 punct_bitmap)
+ u8 count, bool quiet)
{
struct wireless_dev *wdev = netdev->ieee80211_ptr;
struct sk_buff *msg;
@@ -19434,9 +19376,6 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
}
- if (nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, punct_bitmap))
- goto nla_put_failure;
-
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -19449,7 +19388,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id, u16 punct_bitmap)
+ unsigned int link_id)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19458,7 +19397,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
lockdep_assert_wiphy(wdev->wiphy);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_notify(dev, chandef, link_id, punct_bitmap);
+ trace_cfg80211_ch_switch_notify(dev, chandef, link_id);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -19487,15 +19426,14 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
cfg80211_sched_dfs_chan_update(rdev);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
- NL80211_CMD_CH_SWITCH_NOTIFY, 0, false,
- punct_bitmap);
+ NL80211_CMD_CH_SWITCH_NOTIFY, 0, false);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet, u16 punct_bitmap)
+ bool quiet)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19504,13 +19442,12 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
lockdep_assert_wiphy(wdev->wiphy);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id,
- punct_bitmap);
+ trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY,
- count, quiet, punct_bitmap);
+ count, quiet);
}
EXPORT_SYMBOL(cfg80211_ch_switch_started_notify);
@@ -19887,6 +19824,11 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS))
goto free_msg;
+ if (wakeup->unprot_deauth_disassoc &&
+ nla_put_flag(msg,
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC))
+ goto free_msg;
+
if (wakeup->packet) {
u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
u32 len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN;
@@ -20167,9 +20109,26 @@ int cfg80211_external_auth_request(struct net_device *dev,
if (!hdr)
goto nla_put_failure;
+ /* Some historical mistakes in drivers <-> userspace interface (notably
+ * between drivers and wpa_supplicant) led to a big-endian conversion
+ * being needed on NL80211_ATTR_AKM_SUITES _only_ when its value is
+ * WLAN_AKM_SUITE_SAE. This is now fixed on userspace side, but for the
+ * benefit of older wpa_supplicant versions, send this particular value
+ * in big-endian. Note that newer wpa_supplicant will also detect this
+ * particular value in big endian still, so it all continues to work.
+ */
+ if (params->key_mgmt_suite == WLAN_AKM_SUITE_SAE) {
+ if (nla_put_be32(msg, NL80211_ATTR_AKM_SUITES,
+ cpu_to_be32(WLAN_AKM_SUITE_SAE)))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u32(msg, NL80211_ATTR_AKM_SUITES,
+ params->key_mgmt_suite))
+ goto nla_put_failure;
+ }
+
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put_u32(msg, NL80211_ATTR_AKM_SUITES, params->key_mgmt_suite) ||
nla_put_u32(msg, NL80211_ATTR_EXTERNAL_AUTH_ACTION,
params->action) ||
nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2741b626919a..753f8e9aa4b1 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -5,7 +5,7 @@
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -57,6 +57,8 @@
#include <linux/verification.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
+#include <linux/units.h>
+
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
@@ -1289,20 +1291,17 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
u32 freq_khz)
{
-#define ONE_GHZ_IN_KHZ 1000000
/*
* From 802.11ad: directional multi-gigabit (DMG):
* Pertaining to operation in a frequency band containing a channel
* with the Channel starting frequency above 45 GHz.
*/
- u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
- 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+ u32 limit = freq_khz > 45 * KHZ_PER_GHZ ? 20 * KHZ_PER_GHZ : 2 * KHZ_PER_GHZ;
if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
return true;
return false;
-#undef ONE_GHZ_IN_KHZ
}
/*
@@ -1595,10 +1594,10 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_EHT;
if (rd_flags & NL80211_RRF_DFS_CONCURRENT)
channel_flags |= IEEE80211_CHAN_DFS_CONCURRENT;
- if (rd_flags & NL80211_RRF_NO_UHB_VLP_CLIENT)
- channel_flags |= IEEE80211_CHAN_NO_UHB_VLP_CLIENT;
- if (rd_flags & NL80211_RRF_NO_UHB_AFC_CLIENT)
- channel_flags |= IEEE80211_CHAN_NO_UHB_AFC_CLIENT;
+ if (rd_flags & NL80211_RRF_NO_6GHZ_VLP_CLIENT)
+ channel_flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
+ if (rd_flags & NL80211_RRF_NO_6GHZ_AFC_CLIENT)
+ channel_flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
if (rd_flags & NL80211_RRF_PSD)
channel_flags |= IEEE80211_CHAN_PSD;
return channel_flags;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 389a52c29bfc..5a5dd3ce497f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,7 +5,7 @@
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -77,45 +77,6 @@ MODULE_PARM_DESC(bss_entries_limit,
#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
-/**
- * struct cfg80211_colocated_ap - colocated AP information
- *
- * @list: linked list to all colocated aPS
- * @bssid: BSSID of the reported AP
- * @ssid: SSID of the reported AP
- * @ssid_len: length of the ssid
- * @center_freq: frequency the reported AP is on
- * @unsolicited_probe: the reported AP is part of an ESS, where all the APs
- * that operate in the same channel as the reported AP and that might be
- * detected by a STA receiving this frame, are transmitting unsolicited
- * Probe Response frames every 20 TUs
- * @oct_recommended: OCT is recommended to exchange MMPDUs with the reported AP
- * @same_ssid: the reported AP has the same SSID as the reporting AP
- * @multi_bss: the reported AP is part of a multiple BSSID set
- * @transmitted_bssid: the reported AP is the transmitting BSSID
- * @colocated_ess: all the APs that share the same ESS as the reported AP are
- * colocated and can be discovered via legacy bands.
- * @short_ssid_valid: short_ssid is valid and can be used
- * @short_ssid: the short SSID for this SSID
- * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP
- */
-struct cfg80211_colocated_ap {
- struct list_head list;
- u8 bssid[ETH_ALEN];
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- size_t ssid_len;
- u32 short_ssid;
- u32 center_freq;
- u8 unsolicited_probe:1,
- oct_recommended:1,
- same_ssid:1,
- multi_bss:1,
- transmitted_bssid:1,
- colocated_ess:1,
- short_ssid_valid:1;
- s8 psd_20;
-};
-
static void bss_free(struct cfg80211_internal_bss *bss)
{
struct cfg80211_bss_ies *ies;
@@ -566,7 +527,8 @@ static int cfg80211_calc_short_ssid(const struct cfg80211_bss_ies *ies,
return 0;
}
-static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
+VISIBLE_IF_CFG80211_KUNIT void
+cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
{
struct cfg80211_colocated_ap *ap, *tmp_ap;
@@ -575,6 +537,7 @@ static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
kfree(ap);
}
}
+EXPORT_SYMBOL_IF_CFG80211_KUNIT(cfg80211_free_coloc_ap_list);
static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
const u8 *pos, u8 length,
@@ -648,104 +611,140 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
return 0;
}
-static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
- struct list_head *list)
+bool cfg80211_iter_rnr(const u8 *elems, size_t elems_len,
+ enum cfg80211_rnr_iter_ret
+ (*iter)(void *data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len),
+ void *iter_data)
{
- struct ieee80211_neighbor_ap_info *ap_info;
- const struct element *elem, *ssid_elem;
+ const struct element *rnr;
const u8 *pos, *end;
- u32 s_ssid_tmp;
- int n_coloc = 0, ret;
- LIST_HEAD(ap_list);
- ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
- if (ret)
- return 0;
+ for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
+ elems, elems_len) {
+ const struct ieee80211_neighbor_ap_info *info;
- for_each_element_id(elem, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
- ies->data, ies->len) {
- pos = elem->data;
- end = elem->data + elem->datalen;
+ pos = rnr->data;
+ end = rnr->data + rnr->datalen;
/* RNR IE may contain more than one NEIGHBOR_AP_INFO */
- while (pos + sizeof(*ap_info) <= end) {
- enum nl80211_band band;
- int freq;
+ while (sizeof(*info) <= end - pos) {
u8 length, i, count;
+ u8 type;
- ap_info = (void *)pos;
- count = u8_get_bits(ap_info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
- length = ap_info->tbtt_info_len;
+ info = (void *)pos;
+ count = u8_get_bits(info->tbtt_info_hdr,
+ IEEE80211_AP_INFO_TBTT_HDR_COUNT) +
+ 1;
+ length = info->tbtt_info_len;
- pos += sizeof(*ap_info);
+ pos += sizeof(*info);
- if (!ieee80211_operating_class_to_band(ap_info->op_class,
- &band))
- break;
+ if (count * length > end - pos)
+ return false;
- freq = ieee80211_channel_to_frequency(ap_info->channel,
- band);
+ type = u8_get_bits(info->tbtt_info_hdr,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE);
- if (end - pos < count * length)
- break;
+ for (i = 0; i < count; i++) {
+ switch (iter(iter_data, type, info,
+ pos, length)) {
+ case RNR_ITER_CONTINUE:
+ break;
+ case RNR_ITER_BREAK:
+ return true;
+ case RNR_ITER_ERROR:
+ return false;
+ }
- if (u8_get_bits(ap_info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_TYPE) !=
- IEEE80211_TBTT_INFO_TYPE_TBTT) {
- pos += count * length;
- continue;
+ pos += length;
}
+ }
- /* TBTT info must include bss param + BSSID +
- * (short SSID or same_ssid bit to be set).
- * ignore other options, and move to the
- * next AP info
- */
- if (band != NL80211_BAND_6GHZ ||
- !(length == offsetofend(struct ieee80211_tbtt_info_7_8_9,
- bss_params) ||
- length == sizeof(struct ieee80211_tbtt_info_7_8_9) ||
- length >= offsetofend(struct ieee80211_tbtt_info_ge_11,
- bss_params))) {
- pos += count * length;
- continue;
- }
+ if (pos != end)
+ return false;
+ }
- for (i = 0; i < count; i++) {
- struct cfg80211_colocated_ap *entry;
+ return true;
+}
+EXPORT_SYMBOL_GPL(cfg80211_iter_rnr);
+
+struct colocated_ap_data {
+ const struct element *ssid_elem;
+ struct list_head ap_list;
+ u32 s_ssid_tmp;
+ int n_coloc;
+};
- entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN,
- GFP_ATOMIC);
+static enum cfg80211_rnr_iter_ret
+cfg80211_parse_colocated_ap_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ struct colocated_ap_data *data = _data;
+ struct cfg80211_colocated_ap *entry;
+ enum nl80211_band band;
- if (!entry)
- goto error;
+ if (type != IEEE80211_TBTT_INFO_TYPE_TBTT)
+ return RNR_ITER_CONTINUE;
- entry->center_freq = freq;
+ if (!ieee80211_operating_class_to_band(info->op_class, &band))
+ return RNR_ITER_CONTINUE;
- if (!cfg80211_parse_ap_info(entry, pos, length,
- ssid_elem,
- s_ssid_tmp)) {
- n_coloc++;
- list_add_tail(&entry->list, &ap_list);
- } else {
- kfree(entry);
- }
+ /* TBTT info must include bss param + BSSID + (short SSID or
+ * same_ssid bit to be set). Ignore other options, and move to
+ * the next AP info
+ */
+ if (band != NL80211_BAND_6GHZ ||
+ !(tbtt_info_len == offsetofend(struct ieee80211_tbtt_info_7_8_9,
+ bss_params) ||
+ tbtt_info_len == sizeof(struct ieee80211_tbtt_info_7_8_9) ||
+ tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+ bss_params)))
+ return RNR_ITER_CONTINUE;
+
+ entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN, GFP_ATOMIC);
+ if (!entry)
+ return RNR_ITER_ERROR;
+
+ entry->center_freq =
+ ieee80211_channel_to_frequency(info->channel, band);
+
+ if (!cfg80211_parse_ap_info(entry, tbtt_info, tbtt_info_len,
+ data->ssid_elem, data->s_ssid_tmp)) {
+ data->n_coloc++;
+ list_add_tail(&entry->list, &data->ap_list);
+ } else {
+ kfree(entry);
+ }
- pos += length;
- }
- }
+ return RNR_ITER_CONTINUE;
+}
-error:
- if (pos != end) {
- cfg80211_free_coloc_ap_list(&ap_list);
- return 0;
- }
+VISIBLE_IF_CFG80211_KUNIT int
+cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ struct list_head *list)
+{
+ struct colocated_ap_data data = {};
+ int ret;
+
+ INIT_LIST_HEAD(&data.ap_list);
+
+ ret = cfg80211_calc_short_ssid(ies, &data.ssid_elem, &data.s_ssid_tmp);
+ if (ret)
+ return 0;
+
+ if (!cfg80211_iter_rnr(ies->data, ies->len,
+ cfg80211_parse_colocated_ap_iter, &data)) {
+ cfg80211_free_coloc_ap_list(&data.ap_list);
+ return 0;
}
- list_splice_tail(&ap_list, list);
- return n_coloc;
+ list_splice_tail(&data.ap_list, list);
+ return data.n_coloc;
}
+EXPORT_SYMBOL_IF_CFG80211_KUNIT(cfg80211_parse_colocated_ap);
static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
struct ieee80211_channel *chan,
@@ -2122,6 +2121,35 @@ struct cfg80211_inform_single_bss_data {
u64 cannot_use_reasons;
};
+static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen,
+ const u32 flags)
+{
+ const struct element *tmp;
+ struct ieee80211_he_operation *he_oper;
+
+ tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
+ if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
+
+ he_oper = (void *)&tmp->data[1];
+ he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
+
+ if (!he_6ghz_oper)
+ return false;
+
+ switch (u8_get_bits(he_6ghz_oper->control,
+ IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ return true;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT);
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT);
+ }
+ }
+ return false;
+}
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
static struct cfg80211_bss *
cfg80211_inform_single_bss_data(struct wiphy *wiphy,
@@ -2154,6 +2182,14 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
if (!channel)
return NULL;
+ if (channel->band == NL80211_BAND_6GHZ &&
+ !cfg80211_6ghz_power_type_valid(data->ie, data->ielen,
+ channel->flags)) {
+ data->use_for = 0;
+ data->cannot_use_reasons =
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH;
+ }
+
memcpy(tmp.pub.bssid, data->bssid, ETH_ALEN);
tmp.pub.channel = channel;
if (data->bss_source != BSS_SOURCE_STA_PROFILE)
@@ -2165,6 +2201,9 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
tmp.ts_boottime = drv_data->boottime_ns;
tmp.parent_tsf = drv_data->parent_tsf;
ether_addr_copy(tmp.parent_bssid, drv_data->parent_bssid);
+ tmp.pub.chains = drv_data->chains;
+ memcpy(tmp.pub.chain_signal, drv_data->chain_signal,
+ IEEE80211_MAX_CHAINS);
tmp.pub.use_for = data->use_for;
tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
@@ -2208,6 +2247,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
switch (data->ftype) {
case CFG80211_BSS_FTYPE_BEACON:
+ case CFG80211_BSS_FTYPE_S1G_BEACON:
ies->from_beacon = true;
fallthrough;
case CFG80211_BSS_FTYPE_UNKNOWN:
@@ -2464,16 +2504,22 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
if (elem->id == WLAN_EID_EXTENSION) {
copied = elem->datalen - 1;
- if (copied > data_len)
- return -ENOSPC;
- memmove(data, elem->data + 1, copied);
+ if (data) {
+ if (copied > data_len)
+ return -ENOSPC;
+
+ memmove(data, elem->data + 1, copied);
+ }
} else {
copied = elem->datalen;
- if (copied > data_len)
- return -ENOSPC;
- memmove(data, elem->data, copied);
+ if (data) {
+ if (copied > data_len)
+ return -ENOSPC;
+
+ memmove(data, elem->data, copied);
+ }
}
/* Fragmented elements must have 255 bytes */
@@ -2492,10 +2538,13 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
elem_datalen = elem->datalen;
- if (copied + elem_datalen > data_len)
- return -ENOSPC;
+ if (data) {
+ if (copied + elem_datalen > data_len)
+ return -ENOSPC;
+
+ memmove(data + copied, elem->data, elem_datalen);
+ }
- memmove(data + copied, elem->data, elem_datalen);
copied += elem_datalen;
/* Only the last fragment may be short */
@@ -2601,77 +2650,168 @@ error:
return NULL;
}
+struct tbtt_info_iter_data {
+ const struct ieee80211_neighbor_ap_info *ap_info;
+ u8 param_ch_count;
+ u32 use_for;
+ u8 mld_id, link_id;
+};
+
+static enum cfg80211_rnr_iter_ret
+cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ const struct ieee80211_rnr_mld_params *mld_params;
+ struct tbtt_info_iter_data *data = _data;
+ u8 link_id;
+
+ if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
+ tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+ mld_params))
+ mld_params = (void *)(tbtt_info +
+ offsetof(struct ieee80211_tbtt_info_ge_11,
+ mld_params));
+ else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
+ tbtt_info_len >= sizeof(struct ieee80211_rnr_mld_params))
+ mld_params = (void *)tbtt_info;
+ else
+ return RNR_ITER_CONTINUE;
+
+ link_id = le16_get_bits(mld_params->params,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+
+ if (data->mld_id != mld_params->mld_id)
+ return RNR_ITER_CONTINUE;
+
+ if (data->link_id != link_id)
+ return RNR_ITER_CONTINUE;
+
+ data->ap_info = info;
+ data->param_ch_count =
+ le16_get_bits(mld_params->params,
+ IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+
+ if (type == IEEE80211_TBTT_INFO_TYPE_TBTT)
+ data->use_for = NL80211_BSS_USE_FOR_ALL;
+ else
+ data->use_for = NL80211_BSS_USE_FOR_MLD_LINK;
+ return RNR_ITER_BREAK;
+}
+
static u8
-cfg80211_tbtt_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
- const struct ieee80211_neighbor_ap_info **ap_info,
- const u8 **tbtt_info)
+cfg80211_rnr_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
+ const struct ieee80211_neighbor_ap_info **ap_info,
+ u8 *param_ch_count)
{
- const struct ieee80211_neighbor_ap_info *info;
- const struct element *rnr;
- const u8 *pos, *end;
+ struct tbtt_info_iter_data data = {
+ .mld_id = mld_id,
+ .link_id = link_id,
+ };
- for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT, ie, ielen) {
- pos = rnr->data;
- end = rnr->data + rnr->datalen;
+ cfg80211_iter_rnr(ie, ielen, cfg802121_mld_ap_rnr_iter, &data);
- /* RNR IE may contain more than one NEIGHBOR_AP_INFO */
- while (sizeof(*info) <= end - pos) {
- const struct ieee80211_rnr_mld_params *mld_params;
- u16 params;
- u8 length, i, count, mld_params_offset;
- u8 type, lid;
- u32 use_for;
+ *ap_info = data.ap_info;
+ *param_ch_count = data.param_ch_count;
- info = (void *)pos;
- count = u8_get_bits(info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
- length = info->tbtt_info_len;
+ return data.use_for;
+}
- pos += sizeof(*info);
+static struct element *
+cfg80211_gen_reporter_rnr(struct cfg80211_bss *source_bss, bool is_mbssid,
+ bool same_mld, u8 link_id, u8 bss_change_count,
+ gfp_t gfp)
+{
+ const struct cfg80211_bss_ies *ies;
+ struct ieee80211_neighbor_ap_info ap_info;
+ struct ieee80211_tbtt_info_ge_11 tbtt_info;
+ u32 short_ssid;
+ const struct element *elem;
+ struct element *res;
- if (count * length > end - pos)
- return 0;
+ /*
+ * We only generate the RNR to permit ML lookups. For that we do not
+ * need an entry for the corresponding transmitting BSS, lets just skip
+ * it even though it would be easy to add.
+ */
+ if (!same_mld)
+ return NULL;
- type = u8_get_bits(info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_TYPE);
+ /* We could use tx_data->ies if we change cfg80211_calc_short_ssid */
+ rcu_read_lock();
+ ies = rcu_dereference(source_bss->ies);
- if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
- length >=
- offsetofend(struct ieee80211_tbtt_info_ge_11,
- mld_params)) {
- mld_params_offset =
- offsetof(struct ieee80211_tbtt_info_ge_11, mld_params);
- use_for = NL80211_BSS_USE_FOR_ALL;
- } else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
- length >= sizeof(struct ieee80211_rnr_mld_params)) {
- mld_params_offset = 0;
- use_for = NL80211_BSS_USE_FOR_MLD_LINK;
- } else {
- pos += count * length;
- continue;
- }
+ ap_info.tbtt_info_len = offsetofend(typeof(tbtt_info), mld_params);
+ ap_info.tbtt_info_hdr =
+ u8_encode_bits(IEEE80211_TBTT_INFO_TYPE_TBTT,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE) |
+ u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT);
- for (i = 0; i < count; i++) {
- mld_params = (void *)pos + mld_params_offset;
- params = le16_to_cpu(mld_params->params);
+ ap_info.channel = ieee80211_frequency_to_channel(source_bss->channel->center_freq);
- lid = u16_get_bits(params,
- IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+ /* operating class */
+ elem = cfg80211_find_elem(WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ ies->data, ies->len);
+ if (elem && elem->datalen >= 1) {
+ ap_info.op_class = elem->data[0];
+ } else {
+ struct cfg80211_chan_def chandef;
- if (mld_id == mld_params->mld_id &&
- link_id == lid) {
- *ap_info = info;
- *tbtt_info = pos;
+ /* The AP is not providing us with anything to work with. So
+ * make up a somewhat reasonable operating class, but don't
+ * bother with it too much as no one will ever use the
+ * information.
+ */
+ cfg80211_chandef_create(&chandef, source_bss->channel,
+ NL80211_CHAN_NO_HT);
- return use_for;
- }
+ if (!ieee80211_chandef_to_operating_class(&chandef,
+ &ap_info.op_class))
+ goto out_unlock;
+ }
- pos += length;
- }
- }
+ /* Just set TBTT offset and PSD 20 to invalid/unknown */
+ tbtt_info.tbtt_offset = 255;
+ tbtt_info.psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+
+ memcpy(tbtt_info.bssid, source_bss->bssid, ETH_ALEN);
+ if (cfg80211_calc_short_ssid(ies, &elem, &short_ssid))
+ goto out_unlock;
+
+ rcu_read_unlock();
+
+ tbtt_info.short_ssid = cpu_to_le32(short_ssid);
+
+ tbtt_info.bss_params = IEEE80211_RNR_TBTT_PARAMS_SAME_SSID;
+
+ if (is_mbssid) {
+ tbtt_info.bss_params |= IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID;
+ tbtt_info.bss_params |= IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID;
}
- return 0;
+ tbtt_info.mld_params.mld_id = 0;
+ tbtt_info.mld_params.params =
+ le16_encode_bits(link_id, IEEE80211_RNR_MLD_PARAMS_LINK_ID) |
+ le16_encode_bits(bss_change_count,
+ IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+
+ res = kzalloc(struct_size(res, data,
+ sizeof(ap_info) + ap_info.tbtt_info_len),
+ gfp);
+ if (!res)
+ return NULL;
+
+ /* Copy the data */
+ res->id = WLAN_EID_REDUCED_NEIGHBOR_REPORT;
+ res->datalen = sizeof(ap_info) + ap_info.tbtt_info_len;
+ memcpy(res->data, &ap_info, sizeof(ap_info));
+ memcpy(res->data + sizeof(ap_info), &tbtt_info, ap_info.tbtt_info_len);
+
+ return res;
+
+out_unlock:
+ rcu_read_unlock();
+ return NULL;
}
static void
@@ -2687,25 +2827,25 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
.source_bss = source_bss,
.bss_source = BSS_SOURCE_STA_PROFILE,
};
+ struct element *reporter_rnr = NULL;
struct ieee80211_multi_link_elem *ml_elem;
struct cfg80211_mle *mle;
u16 control;
u8 ml_common_len;
- u8 *new_ie;
+ u8 *new_ie = NULL;
struct cfg80211_bss *bss;
- int mld_id;
+ u8 mld_id, reporter_link_id, bss_change_count;
u16 seen_links = 0;
- const u8 *pos;
u8 i;
- if (!ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1))
+ if (!ieee80211_mle_type_ok(elem->data + 1,
+ IEEE80211_ML_CONTROL_TYPE_BASIC,
+ elem->datalen - 1))
return;
- ml_elem = (void *)elem->data + 1;
+ ml_elem = (void *)(elem->data + 1);
control = le16_to_cpu(ml_elem->control);
- if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) !=
- IEEE80211_ML_CONTROL_TYPE_BASIC)
- return;
+ ml_common_len = ml_elem->variable[0];
/* Must be present when transmitted by an AP (in a probe response) */
if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) ||
@@ -2713,18 +2853,8 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
return;
- ml_common_len = ml_elem->variable[0];
-
- /* length + MLD MAC address + link ID info + BSS Params Change Count */
- pos = ml_elem->variable + 1 + 6 + 1 + 1;
-
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
- pos += 2;
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_EML_CAPA))
- pos += 2;
-
- /* MLD capabilities and operations */
- pos += 2;
+ reporter_link_id = ieee80211_mle_get_link_id(elem->data + 1);
+ bss_change_count = ieee80211_mle_get_bss_param_ch_cnt(elem->data + 1);
/*
* The MLD ID of the reporting AP is always zero. It is set if the AP
@@ -2732,32 +2862,35 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
* relating to a nontransmitted BSS (matching the Multi-BSSID Index,
* Draft P802.11be_D3.2, 35.3.4.2)
*/
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MLD_ID)) {
- mld_id = *pos;
- pos += 1;
- } else {
- mld_id = 0;
- }
-
- /* Extended MLD capabilities and operations */
- pos += 2;
+ mld_id = ieee80211_mle_get_mld_id(elem->data + 1);
/* Fully defrag the ML element for sta information/profile iteration */
mle = cfg80211_defrag_mle(elem, tx_data->ie, tx_data->ielen, gfp);
if (!mle)
return;
+ /* No point in doing anything if there is no per-STA profile */
+ if (!mle->sta_prof[0])
+ goto out;
+
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
if (!new_ie)
goto out;
+ reporter_rnr = cfg80211_gen_reporter_rnr(source_bss,
+ u16_get_bits(control,
+ IEEE80211_MLC_BASIC_PRES_MLD_ID),
+ mld_id == 0, reporter_link_id,
+ bss_change_count,
+ gfp);
+
for (i = 0; i < ARRAY_SIZE(mle->sta_prof) && mle->sta_prof[i]; i++) {
const struct ieee80211_neighbor_ap_info *ap_info;
enum nl80211_band band;
u32 freq;
const u8 *profile;
- const u8 *tbtt_info;
ssize_t profile_len;
+ u8 param_ch_count;
u8 link_id, use_for;
if (!ieee80211_mle_basic_sta_prof_size_ok((u8 *)mle->sta_prof[i],
@@ -2800,10 +2933,11 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
profile_len -= 2;
/* Find in RNR to look up channel information */
- use_for = cfg80211_tbtt_info_for_mld_ap(tx_data->ie,
- tx_data->ielen,
- mld_id, link_id,
- &ap_info, &tbtt_info);
+ use_for = cfg80211_rnr_info_for_mld_ap(tx_data->ie,
+ tx_data->ielen,
+ mld_id, link_id,
+ &ap_info,
+ &param_ch_count);
if (!use_for)
continue;
@@ -2846,7 +2980,8 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
continue;
/* Copy the Basic Multi-Link element including the common
- * information, and then fix up the link ID.
+ * information, and then fix up the link ID and BSS param
+ * change count.
* Note that the ML element length has been verified and we
* also checked that it contains the link ID.
*/
@@ -2857,10 +2992,21 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
sizeof(*ml_elem) + ml_common_len);
new_ie[data.ielen + sizeof(*ml_elem) + 1 + ETH_ALEN] = link_id;
+ new_ie[data.ielen + sizeof(*ml_elem) + 1 + ETH_ALEN + 1] =
+ param_ch_count;
data.ielen += sizeof(*ml_elem) + ml_common_len;
- /* TODO: Add an RNR containing only the reporting AP */
+ if (reporter_rnr && (use_for & NL80211_BSS_USE_FOR_NORMAL)) {
+ if (data.ielen + sizeof(struct element) +
+ reporter_rnr->datalen > IEEE80211_MAX_DATA_LEN)
+ continue;
+
+ memcpy(new_ie + data.ielen, reporter_rnr,
+ sizeof(struct element) + reporter_rnr->datalen);
+ data.ielen += sizeof(struct element) +
+ reporter_rnr->datalen;
+ }
bss = cfg80211_inform_single_bss_data(wiphy, &data, gfp);
if (!bss)
@@ -2869,6 +3015,7 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
}
out:
+ kfree(reporter_rnr);
kfree(new_ie);
kfree(mle);
}
@@ -2921,6 +3068,10 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
if (!res)
return NULL;
+ /* don't do any further MBSSID/ML handling for S1G */
+ if (ftype == CFG80211_BSS_FTYPE_S1G_BEACON)
+ return res;
+
cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
@@ -2929,59 +3080,22 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_inform_bss_data);
-static bool cfg80211_uhb_power_type_valid(const u8 *ie,
- size_t ielen,
- const u32 flags)
-{
- const struct element *tmp;
- struct ieee80211_he_operation *he_oper;
-
- tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
- if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
- const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
-
- he_oper = (void *)&tmp->data[1];
- he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
-
- if (!he_6ghz_oper)
- return false;
-
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- return true;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- return !(flags & IEEE80211_CHAN_NO_UHB_AFC_CLIENT);
- case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
- return !(flags & IEEE80211_CHAN_NO_UHB_VLP_CLIENT);
- }
- }
- return false;
-}
-
-/* cfg80211_inform_bss_width_frame helper */
-static struct cfg80211_bss *
-cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
+struct cfg80211_bss *
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
{
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct cfg80211_internal_bss tmp = {}, *res;
- struct cfg80211_bss_ies *ies;
- struct ieee80211_channel *channel;
- bool signal_valid;
+ size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
struct ieee80211_ext *ext = NULL;
- u8 *bssid, *variable;
- u16 capability, beacon_int;
- size_t ielen, min_hdr_len = offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable);
- int bss_type;
-
- BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
- offsetof(struct ieee80211_mgmt, u.beacon.variable));
-
- trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
+ enum cfg80211_bss_frame_type ftype;
+ u16 beacon_interval;
+ const u8 *bssid;
+ u16 capability;
+ const u8 *ie;
+ size_t ielen;
+ u64 tsf;
if (WARN_ON(!mgmt))
return NULL;
@@ -2989,9 +3103,10 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
if (WARN_ON(!wiphy))
return NULL;
- if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (data->signal < 0 || data->signal > 100)))
- return NULL;
+ BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
+ offsetof(struct ieee80211_mgmt, u.beacon.variable));
+
+ trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
ext = (void *) mgmt;
@@ -3005,32 +3120,17 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
return NULL;
ielen = len - min_hdr_len;
- variable = mgmt->u.probe_resp.variable;
- if (ext) {
- if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
- variable = ext->u.s1g_short_beacon.variable;
- else
- variable = ext->u.s1g_beacon.variable;
- }
-
- channel = cfg80211_get_bss_channel(wiphy, variable, ielen, data->chan);
- if (!channel)
- return NULL;
-
- if (channel->band == NL80211_BAND_6GHZ &&
- !cfg80211_uhb_power_type_valid(variable, ielen, channel->flags)) {
- data->restrict_use = 1;
- data->use_for = 0;
- data->cannot_use_reasons =
- NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH;
- }
-
+ ie = mgmt->u.probe_resp.variable;
if (ext) {
const struct ieee80211_s1g_bcn_compat_ie *compat;
const struct element *elem;
- elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT,
- variable, ielen);
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ ie = ext->u.s1g_short_beacon.variable;
+ else
+ ie = ext->u.s1g_beacon.variable;
+
+ elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, ie, ielen);
if (!elem)
return NULL;
if (elem->datalen < sizeof(*compat))
@@ -3038,112 +3138,26 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
compat = (void *)elem->data;
bssid = ext->u.s1g_beacon.sa;
capability = le16_to_cpu(compat->compat_info);
- beacon_int = le16_to_cpu(compat->beacon_int);
+ beacon_interval = le16_to_cpu(compat->beacon_int);
} else {
bssid = mgmt->bssid;
- beacon_int = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
+ beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
}
- if (channel->band == NL80211_BAND_60GHZ) {
- bss_type = capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
- if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
- bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
- } else {
- if (capability & WLAN_CAPABILITY_ESS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
- }
-
- ies = kzalloc(sizeof(*ies) + ielen, gfp);
- if (!ies)
- return NULL;
- ies->len = ielen;
- ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
- ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control) ||
- ieee80211_is_s1g_beacon(mgmt->frame_control);
- memcpy(ies->data, variable, ielen);
+ tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
if (ieee80211_is_probe_resp(mgmt->frame_control))
- rcu_assign_pointer(tmp.pub.proberesp_ies, ies);
+ ftype = CFG80211_BSS_FTYPE_PRESP;
+ else if (ext)
+ ftype = CFG80211_BSS_FTYPE_S1G_BEACON;
else
- rcu_assign_pointer(tmp.pub.beacon_ies, ies);
- rcu_assign_pointer(tmp.pub.ies, ies);
+ ftype = CFG80211_BSS_FTYPE_BEACON;
- memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
- tmp.pub.beacon_interval = beacon_int;
- tmp.pub.capability = capability;
- tmp.pub.channel = channel;
- tmp.pub.signal = data->signal;
- tmp.ts_boottime = data->boottime_ns;
- tmp.parent_tsf = data->parent_tsf;
- tmp.pub.chains = data->chains;
- memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
- ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
- tmp.pub.use_for = data->restrict_use ?
- data->use_for :
- NL80211_BSS_USE_FOR_ALL;
- tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
-
- signal_valid = data->chan == channel;
- spin_lock_bh(&rdev->bss_lock);
- res = __cfg80211_bss_update(rdev, &tmp, signal_valid, jiffies);
- if (!res)
- goto drop;
-
- rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
-
- spin_unlock_bh(&rdev->bss_lock);
-
- trace_cfg80211_return_bss(&res->pub);
- /* __cfg80211_bss_update gives us a referenced result */
- return &res->pub;
-
-drop:
- spin_unlock_bh(&rdev->bss_lock);
- return NULL;
-}
-
-struct cfg80211_bss *
-cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
-{
- struct cfg80211_inform_single_bss_data inform_data = {
- .drv_data = data,
- .ie = mgmt->u.probe_resp.variable,
- .ielen = len - offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable),
- .use_for = data->restrict_use ?
- data->use_for :
- NL80211_BSS_USE_FOR_ALL,
- .cannot_use_reasons = data->cannot_use_reasons,
- };
- struct cfg80211_bss *res;
-
- res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
- len, gfp);
- if (!res)
- return NULL;
-
- /* don't do any further MBSSID/ML handling for S1G */
- if (ieee80211_is_s1g_beacon(mgmt->frame_control))
- return res;
-
- inform_data.ftype = ieee80211_is_beacon(mgmt->frame_control) ?
- CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
- memcpy(inform_data.bssid, mgmt->bssid, ETH_ALEN);
- inform_data.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
- inform_data.beacon_interval =
- le16_to_cpu(mgmt->u.probe_resp.beacon_int);
-
- /* process each non-transmitting bss */
- cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
-
- cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
-
- return res;
+ return cfg80211_inform_bss_data(wiphy, data, ftype,
+ bssid, tsf, capability,
+ beacon_interval, ie, ielen,
+ gfp);
}
EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 195c8532734b..82e3ce42206c 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -209,7 +209,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev,
if (!req.bss) {
err = -ENOENT;
} else {
- err = cfg80211_mlme_assoc(rdev, wdev->netdev, &req);
+ err = cfg80211_mlme_assoc(rdev, wdev->netdev,
+ &req, NULL);
cfg80211_put_bss(&rdev->wiphy, req.bss);
}
diff --git a/net/wireless/tests/Makefile b/net/wireless/tests/Makefile
index 1f6622fcb758..c364e63b508e 100644
--- a/net/wireless/tests/Makefile
+++ b/net/wireless/tests/Makefile
@@ -1,3 +1,3 @@
-cfg80211-tests-y += module.o fragmentation.o scan.o util.o
+cfg80211-tests-y += module.o fragmentation.o scan.o util.o chan.o
obj-$(CONFIG_CFG80211_KUNIT_TEST) += cfg80211-tests.o
diff --git a/net/wireless/tests/chan.c b/net/wireless/tests/chan.c
new file mode 100644
index 000000000000..d02258ac2dab
--- /dev/null
+++ b/net/wireless/tests/chan.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2023-2024 Intel Corporation
+ */
+#include <net/cfg80211.h>
+#include <kunit/test.h>
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static struct ieee80211_channel chan_6ghz_1 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5955,
+};
+
+static struct ieee80211_channel chan_6ghz_5 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5975,
+};
+
+static struct ieee80211_channel chan_6ghz_105 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 6475,
+};
+
+static const struct chandef_compat_case {
+ const char *desc;
+ /* leave c1 empty for tests for identical */
+ struct cfg80211_chan_def c1, c2;
+ /* we test both ways around, so c2 should always be the compat one */
+ bool compat;
+} chandef_compat_cases[] = {
+ {
+ .desc = "identical non-HT",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20_NOHT,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 20 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 40 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_40,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 80 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_80,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 160 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 320 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40 + 80,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "20 MHz in 320 MHz\n",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40 + 80,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "different 20 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_5,
+ .center_freq1 = 5975,
+ },
+ },
+ {
+ .desc = "different primary 160 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 150,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ },
+ },
+ {
+ /* similar to previous test but one has lower BW */
+ .desc = "matching primary 160 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured secondary 160 Mhz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ .punctured = 0xf,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured matching",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ .punctured = 0xc0,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ .punctured = 0xc000,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured not matching",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ .punctured = 0x80,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ .punctured = 0xc000,
+ },
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(chandef_compat, chandef_compat_cases, desc)
+
+static void test_chandef_compat(struct kunit *test)
+{
+ const struct chandef_compat_case *params = test->param_value;
+ const struct cfg80211_chan_def *ret, *expect;
+ struct cfg80211_chan_def c1 = params->c1;
+
+ /* tests with identical ones */
+ if (!params->c1.chan)
+ c1 = params->c2;
+
+ KUNIT_EXPECT_EQ(test, cfg80211_chandef_valid(&c1), true);
+ KUNIT_EXPECT_EQ(test, cfg80211_chandef_valid(&params->c2), true);
+
+ expect = params->compat ? &params->c2 : NULL;
+
+ ret = cfg80211_chandef_compatible(&c1, &params->c2);
+ KUNIT_EXPECT_PTR_EQ(test, ret, expect);
+
+ if (!params->c1.chan)
+ expect = &c1;
+
+ ret = cfg80211_chandef_compatible(&params->c2, &c1);
+ KUNIT_EXPECT_PTR_EQ(test, ret, expect);
+}
+
+static struct kunit_case chandef_compat_test_cases[] = {
+ KUNIT_CASE_PARAM(test_chandef_compat, chandef_compat_gen_params),
+ {}
+};
+
+static struct kunit_suite chandef_compat = {
+ .name = "cfg80211-chandef-compat",
+ .test_cases = chandef_compat_test_cases,
+};
+
+kunit_test_suite(chandef_compat);
diff --git a/net/wireless/tests/fragmentation.c b/net/wireless/tests/fragmentation.c
index 49a339ca8880..411fae18cd88 100644
--- a/net/wireless/tests/fragmentation.c
+++ b/net/wireless/tests/fragmentation.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for element fragmentation
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
@@ -27,7 +27,12 @@ static void defragment_0(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 253);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
WLAN_EID_FRAGMENT);
KUNIT_EXPECT_EQ(test, ret, 253);
KUNIT_EXPECT_MEMEQ(test, data, input + 3, 253);
@@ -63,7 +68,12 @@ static void defragment_1(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 254 + 7);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
WLAN_EID_FRAGMENT);
/* this means the last fragment was not used */
KUNIT_EXPECT_EQ(test, ret, 254 + 7);
@@ -106,10 +116,15 @@ static void defragment_2(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
WLAN_EID_FRAGMENT);
/* this means the last fragment was not used */
KUNIT_EXPECT_EQ(test, ret, 254 + 255 + 1);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 254 + 255 + 1);
KUNIT_EXPECT_MEMEQ(test, data, input + 3, 254);
KUNIT_EXPECT_MEMEQ(test, data + 254, input + 257 + 2, 255);
KUNIT_EXPECT_MEMEQ(test, data + 254 + 255, input + 2 * 257 + 2, 1);
@@ -134,7 +149,12 @@ static void defragment_at_end(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 254 + 7);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
WLAN_EID_FRAGMENT);
KUNIT_EXPECT_EQ(test, ret, 254 + 7);
KUNIT_EXPECT_MEMEQ(test, data, input + 3, 254);
diff --git a/net/wireless/tests/scan.c b/net/wireless/tests/scan.c
index 77854161cd22..9f458be71659 100644
--- a/net/wireless/tests/scan.c
+++ b/net/wireless/tests/scan.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for inform_bss functions
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
@@ -406,9 +406,32 @@ static struct inform_bss_ml_sta_case {
const char *desc;
int mld_id;
bool sta_prof_vendor_elems;
+ bool include_oper_class;
+ bool nstr;
} inform_bss_ml_sta_cases[] = {
- { .desc = "no_mld_id", .mld_id = 0, .sta_prof_vendor_elems = false },
- { .desc = "mld_id_eq_1", .mld_id = 1, .sta_prof_vendor_elems = true },
+ {
+ .desc = "zero_mld_id",
+ .mld_id = 0,
+ .sta_prof_vendor_elems = false,
+ }, {
+ .desc = "zero_mld_id_with_oper_class",
+ .mld_id = 0,
+ .sta_prof_vendor_elems = false,
+ .include_oper_class = true,
+ }, {
+ .desc = "mld_id_eq_1",
+ .mld_id = 1,
+ .sta_prof_vendor_elems = true,
+ }, {
+ .desc = "mld_id_eq_1_with_oper_class",
+ .mld_id = 1,
+ .sta_prof_vendor_elems = true,
+ .include_oper_class = true,
+ }, {
+ .desc = "nstr",
+ .mld_id = 0,
+ .nstr = true,
+ },
};
KUNIT_ARRAY_PARAM_DESC(inform_bss_ml_sta, inform_bss_ml_sta_cases, desc)
@@ -440,7 +463,7 @@ static void test_inform_bss_ml_sta(struct kunit *test)
struct {
struct ieee80211_neighbor_ap_info info;
struct ieee80211_tbtt_info_ge_11 ap;
- } __packed rnr = {
+ } __packed rnr_normal = {
.info = {
.tbtt_info_hdr = u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT),
.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_ge_11),
@@ -460,6 +483,28 @@ static void test_inform_bss_ml_sta(struct kunit *test)
}
};
struct {
+ struct ieee80211_neighbor_ap_info info;
+ struct ieee80211_rnr_mld_params mld_params;
+ } __packed rnr_nstr = {
+ .info = {
+ .tbtt_info_hdr =
+ u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT) |
+ u8_encode_bits(IEEE80211_TBTT_INFO_TYPE_MLD,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE),
+ .tbtt_info_len = sizeof(struct ieee80211_rnr_mld_params),
+ .op_class = 81,
+ .channel = 11,
+ },
+ .mld_params = {
+ .mld_id = params->mld_id,
+ .params =
+ le16_encode_bits(link_id,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID),
+ }
+ };
+ size_t rnr_len = params->nstr ? sizeof(rnr_nstr) : sizeof(rnr_normal);
+ void *rnr = params->nstr ? (void *)&rnr_nstr : (void *)&rnr_normal;
+ struct {
__le16 control;
u8 var_len;
u8 mld_mac_addr[ETH_ALEN];
@@ -498,7 +543,7 @@ static void test_inform_bss_ml_sta(struct kunit *test)
u16_encode_bits(link_id,
IEEE80211_MLE_STA_CONTROL_LINK_ID)),
.var_len = sizeof(sta_prof) - 2 - 2,
- .bssid = { *rnr.ap.bssid },
+ .bssid = { *rnr_normal.ap.bssid },
.beacon_int = cpu_to_le16(101),
.tsf_offset = cpu_to_le64(-123ll),
.capabilities = cpu_to_le16(0xdead),
@@ -515,9 +560,15 @@ static void test_inform_bss_ml_sta(struct kunit *test)
skb_put_u8(input, 4);
skb_put_data(input, "TEST", 4);
+ if (params->include_oper_class) {
+ skb_put_u8(input, WLAN_EID_SUPPORTED_REGULATORY_CLASSES);
+ skb_put_u8(input, 1);
+ skb_put_u8(input, 81);
+ }
+
skb_put_u8(input, WLAN_EID_REDUCED_NEIGHBOR_REPORT);
- skb_put_u8(input, sizeof(rnr));
- skb_put_data(input, &rnr, sizeof(rnr));
+ skb_put_u8(input, rnr_len);
+ skb_put_data(input, rnr, rnr_len);
/* build a multi-link element */
skb_put_u8(input, WLAN_EID_EXTENSION);
@@ -563,9 +614,10 @@ static void test_inform_bss_ml_sta(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx.inform_bss_count, 2);
/* Check link_bss *****************************************************/
- link_bss = cfg80211_get_bss(wiphy, NULL, sta_prof.bssid, NULL, 0,
- IEEE80211_BSS_TYPE_ANY,
- IEEE80211_PRIVACY_ANY);
+ link_bss = __cfg80211_get_bss(wiphy, NULL, sta_prof.bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY,
+ 0);
KUNIT_ASSERT_NOT_NULL(test, link_bss);
KUNIT_EXPECT_EQ(test, link_bss->signal, 0);
KUNIT_EXPECT_EQ(test, link_bss->beacon_interval,
@@ -576,21 +628,43 @@ static void test_inform_bss_ml_sta(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, link_bss->channel,
ieee80211_get_channel_khz(wiphy, MHZ_TO_KHZ(2462)));
+ /* Test wiphy does not set WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY */
+ if (params->nstr) {
+ KUNIT_EXPECT_EQ(test, link_bss->use_for, 0);
+ KUNIT_EXPECT_EQ(test, link_bss->cannot_use_reasons,
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY);
+ KUNIT_EXPECT_NULL(test,
+ cfg80211_get_bss(wiphy, NULL, sta_prof.bssid,
+ NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY));
+ } else {
+ KUNIT_EXPECT_EQ(test, link_bss->use_for,
+ NL80211_BSS_USE_FOR_ALL);
+ KUNIT_EXPECT_EQ(test, link_bss->cannot_use_reasons, 0);
+ }
+
rcu_read_lock();
ies = rcu_dereference(link_bss->ies);
KUNIT_EXPECT_NOT_NULL(test, ies);
KUNIT_EXPECT_EQ(test, ies->tsf, tsf + le64_to_cpu(sta_prof.tsf_offset));
/* Resulting length should be:
* SSID (inherited) + RNR (inherited) + vendor element(s) +
+ * operating class (if requested) +
+ * generated RNR (if MLD ID == 0 and not NSTR) +
* MLE common info + MLE header and control
*/
if (params->sta_prof_vendor_elems)
KUNIT_EXPECT_EQ(test, ies->len,
- 6 + 2 + sizeof(rnr) + 2 + 160 + 2 + 165 +
+ 6 + 2 + rnr_len + 2 + 160 + 2 + 165 +
+ (params->include_oper_class ? 3 : 0) +
+ (!params->mld_id && !params->nstr ? 22 : 0) +
mle_basic_common_info.var_len + 5);
else
KUNIT_EXPECT_EQ(test, ies->len,
- 6 + 2 + sizeof(rnr) + 2 + 155 +
+ 6 + 2 + rnr_len + 2 + 155 +
+ (params->include_oper_class ? 3 : 0) +
+ (!params->mld_id && !params->nstr ? 22 : 0) +
mle_basic_common_info.var_len + 5);
rcu_read_unlock();
@@ -598,6 +672,172 @@ static void test_inform_bss_ml_sta(struct kunit *test)
cfg80211_put_bss(wiphy, link_bss);
}
+static struct cfg80211_parse_colocated_ap_case {
+ const char *desc;
+ u8 op_class;
+ u8 channel;
+ struct ieee80211_neighbor_ap_info info;
+ union {
+ struct ieee80211_tbtt_info_ge_11 tbtt_long;
+ struct ieee80211_tbtt_info_7_8_9 tbtt_short;
+ };
+ bool add_junk;
+ bool same_ssid;
+ bool valid;
+} cfg80211_parse_colocated_ap_cases[] = {
+ {
+ .desc = "wrong_band",
+ .info.op_class = 81,
+ .info.channel = 11,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "wrong_type",
+ /* IEEE80211_AP_INFO_TBTT_HDR_TYPE is in the least significant bits */
+ .info.tbtt_info_hdr = IEEE80211_TBTT_INFO_TYPE_MLD,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_invalid_len_short",
+ .info.tbtt_info_len = 6,
+ .tbtt_short = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_invalid_len_short_mld",
+ .info.tbtt_info_len = 10,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_non_mld",
+ .info.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_7_8_9),
+ .tbtt_short = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .same_ssid = true,
+ .valid = true,
+ },
+ {
+ .desc = "colocated_non_mld_invalid_bssid",
+ .info.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_7_8_9),
+ .tbtt_short = {
+ .bssid = { 0xff, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .same_ssid = true,
+ .valid = false,
+ },
+ {
+ .desc = "colocated_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = true,
+ },
+ {
+ .desc = "colocated_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .add_junk = true,
+ .valid = false,
+ },
+ {
+ .desc = "colocated_disabled_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ .mld_params.params = cpu_to_le16(IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK),
+ },
+ .valid = false,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(cfg80211_parse_colocated_ap, cfg80211_parse_colocated_ap_cases, desc)
+
+static void test_cfg80211_parse_colocated_ap(struct kunit *test)
+{
+ const struct cfg80211_parse_colocated_ap_case *params = test->param_value;
+ struct sk_buff *input = kunit_zalloc_skb(test, 1024, GFP_KERNEL);
+ struct cfg80211_bss_ies *ies;
+ struct ieee80211_neighbor_ap_info info;
+ LIST_HEAD(coloc_ap_list);
+ int count;
+
+ KUNIT_ASSERT_NOT_NULL(test, input);
+
+ info = params->info;
+
+ /* Reasonable values for a colocated AP */
+ if (!info.tbtt_info_len)
+ info.tbtt_info_len = sizeof(params->tbtt_long);
+ if (!info.op_class)
+ info.op_class = 131;
+ if (!info.channel)
+ info.channel = 33;
+ /* Zero is the correct default for .btt_info_hdr (one entry, TBTT type) */
+
+ skb_put_u8(input, WLAN_EID_SSID);
+ skb_put_u8(input, 4);
+ skb_put_data(input, "TEST", 4);
+
+ skb_put_u8(input, WLAN_EID_REDUCED_NEIGHBOR_REPORT);
+ skb_put_u8(input, sizeof(info) + info.tbtt_info_len + (params->add_junk ? 3 : 0));
+ skb_put_data(input, &info, sizeof(info));
+ skb_put_data(input, &params->tbtt_long, info.tbtt_info_len);
+
+ if (params->add_junk)
+ skb_put_data(input, "123", 3);
+
+ ies = kunit_kzalloc(test, struct_size(ies, data, input->len), GFP_KERNEL);
+ ies->len = input->len;
+ memcpy(ies->data, input->data, input->len);
+
+ count = cfg80211_parse_colocated_ap(ies, &coloc_ap_list);
+
+ KUNIT_EXPECT_EQ(test, count, params->valid);
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&coloc_ap_list), params->valid);
+
+ if (params->valid && !list_empty(&coloc_ap_list)) {
+ struct cfg80211_colocated_ap *ap;
+
+ ap = list_first_entry(&coloc_ap_list, typeof(*ap), list);
+ if (info.tbtt_info_len <= sizeof(params->tbtt_short))
+ KUNIT_EXPECT_MEMEQ(test, ap->bssid, params->tbtt_short.bssid, ETH_ALEN);
+ else
+ KUNIT_EXPECT_MEMEQ(test, ap->bssid, params->tbtt_long.bssid, ETH_ALEN);
+
+ if (params->same_ssid) {
+ KUNIT_EXPECT_EQ(test, ap->ssid_len, 4);
+ KUNIT_EXPECT_MEMEQ(test, ap->ssid, "TEST", 4);
+ } else {
+ KUNIT_EXPECT_EQ(test, ap->ssid_len, 0);
+ }
+ }
+
+ cfg80211_free_coloc_ap_list(&coloc_ap_list);
+}
+
static struct kunit_case gen_new_ie_test_cases[] = {
KUNIT_CASE_PARAM(test_gen_new_ie, gen_new_ie_gen_params),
KUNIT_CASE(test_gen_new_ie_malformed),
@@ -623,3 +863,16 @@ static struct kunit_suite inform_bss = {
};
kunit_test_suite(inform_bss);
+
+static struct kunit_case scan_6ghz_cases[] = {
+ KUNIT_CASE_PARAM(test_cfg80211_parse_colocated_ap,
+ cfg80211_parse_colocated_ap_gen_params),
+ {}
+};
+
+static struct kunit_suite scan_6ghz = {
+ .name = "cfg80211-scan-6ghz",
+ .test_cases = scan_6ghz_cases,
+};
+
+kunit_test_suite(scan_6ghz);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 1f374c8a17a5..e039e66ab377 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright(c) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018, 2020-2024 Intel Corporation
+ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cfg80211
@@ -135,7 +140,8 @@
__field(u32, width) \
__field(u32, center_freq1) \
__field(u32, freq1_offset) \
- __field(u32, center_freq2)
+ __field(u32, center_freq2) \
+ __field(u16, punctured)
#define CHAN_DEF_ASSIGN(chandef) \
do { \
if ((chandef) && (chandef)->chan) { \
@@ -148,6 +154,7 @@
__entry->center_freq1 = (chandef)->center_freq1;\
__entry->freq1_offset = (chandef)->freq1_offset;\
__entry->center_freq2 = (chandef)->center_freq2;\
+ __entry->punctured = (chandef)->punctured; \
} else { \
__entry->band = 0; \
__entry->control_freq = 0; \
@@ -156,14 +163,15 @@
__entry->center_freq1 = 0; \
__entry->freq1_offset = 0; \
__entry->center_freq2 = 0; \
+ __entry->punctured = 0; \
} \
} while (0)
#define CHAN_DEF_PR_FMT \
- "band: %d, control freq: %u.%03u, width: %d, cf1: %u.%03u, cf2: %u"
+ "band: %d, control freq: %u.%03u, width: %d, cf1: %u.%03u, cf2: %u, punct: 0x%x"
#define CHAN_DEF_PR_ARG __entry->band, __entry->control_freq, \
__entry->freq_offset, __entry->width, \
__entry->center_freq1, __entry->freq1_offset, \
- __entry->center_freq2
+ __entry->center_freq2, __entry->punctured
#define FILS_AAD_ASSIGN(fa) \
do { \
@@ -810,8 +818,8 @@ DECLARE_EVENT_CLASS(station_add_change,
params->link_sta_params.opmode_notif_used;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
- ", station flags mask: %u, station flags set: %u, "
- "station modify mask: %u, listen interval: %d, aid: %u, "
+ ", station flags mask: 0x%x, station flags set: 0x%x, "
+ "station modify mask: 0x%x, listen interval: %d, aid: %u, "
"plink action: %u, plink state: %u, uapsd queues: %u, vlan:%s",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
__entry->sta_flags_mask, __entry->sta_flags_set,
@@ -859,6 +867,7 @@ DECLARE_EVENT_CLASS(station_del,
MAC_ENTRY(sta_mac)
__field(u8, subtype)
__field(u16, reason_code)
+ __field(int, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -866,11 +875,13 @@ DECLARE_EVENT_CLASS(station_del,
MAC_ASSIGN(sta_mac, params->mac);
__entry->subtype = params->subtype;
__entry->reason_code = params->reason_code;
+ __entry->link_id = params->link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
- ", subtype: %u, reason_code: %u",
+ ", subtype: %u, reason_code: %u, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
- __entry->subtype, __entry->reason_code)
+ __entry->subtype, __entry->reason_code,
+ __entry->link_id)
);
DEFINE_EVENT(station_del, rdev_del_station,
@@ -1064,7 +1075,7 @@ TRACE_EVENT(rdev_return_int_mpath_info,
),
TP_printk(WIPHY_PR_FMT ", returned %d. mpath info - generation: %d, "
"filled: %u, frame qlen: %u, sn: %u, metric: %u, exptime: %u,"
- " discovery timeout: %u, discovery retries: %u, flags: %u",
+ " discovery timeout: %u, discovery retries: %u, flags: 0x%x",
WIPHY_PR_ARG, __entry->ret, __entry->generation,
__entry->filled, __entry->frame_qlen, __entry->sn,
__entry->metric, __entry->exptime, __entry->discovery_timeout,
@@ -1306,7 +1317,7 @@ TRACE_EVENT(rdev_assoc,
req->fils_nonces, 2 * FILS_NONCE_LEN);
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
- ", previous bssid: %pM, use mfp: %s, flags: %u",
+ ", previous bssid: %pM, use mfp: %s, flags: 0x%x",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid,
__entry->prev_bssid, BOOL_TO_STR(__entry->use_mfp),
__entry->flags)
@@ -1428,7 +1439,7 @@ TRACE_EVENT(rdev_connect,
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
- "flags: %u, previous bssid: %pM",
+ "flags: 0x%x, previous bssid: %pM",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->ssid,
__entry->auth_type, BOOL_TO_STR(__entry->privacy),
__entry->wpa_versions, __entry->flags, __entry->prev_bssid)
@@ -2324,6 +2335,7 @@ TRACE_EVENT(rdev_channel_switch,
__field(u8, count)
__dynamic_array(u16, bcn_ofs, params->n_counter_offsets_beacon)
__dynamic_array(u16, pres_ofs, params->n_counter_offsets_presp)
+ __field(u8, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -2341,11 +2353,13 @@ TRACE_EVENT(rdev_channel_switch,
memcpy(__get_dynamic_array(pres_ofs),
params->counter_offsets_presp,
params->n_counter_offsets_presp * sizeof(u16));
+ __entry->link_id = params->link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
- ", block_tx: %d, count: %u, radar_required: %d",
+ ", block_tx: %d, count: %u, radar_required: %d, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
- __entry->block_tx, __entry->count, __entry->radar_required)
+ __entry->block_tx, __entry->count, __entry->radar_required,
+ __entry->link_id)
);
TRACE_EVENT(rdev_set_qos_map,
@@ -3267,47 +3281,39 @@ TRACE_EVENT(cfg80211_chandef_dfs_required,
TRACE_EVENT(cfg80211_ch_switch_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id,
- u16 punct_bitmap),
- TP_ARGS(netdev, chandef, link_id, punct_bitmap),
+ unsigned int link_id),
+ TP_ARGS(netdev, chandef, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
- __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
- __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
- __entry->punct_bitmap)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
);
TRACE_EVENT(cfg80211_ch_switch_started_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id,
- u16 punct_bitmap),
- TP_ARGS(netdev, chandef, link_id, punct_bitmap),
+ unsigned int link_id),
+ TP_ARGS(netdev, chandef, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
- __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
- __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
- __entry->punct_bitmap)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
);
TRACE_EVENT(cfg80211_radar_event,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d1ce3bee2797..2bde8a354631 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -791,15 +791,19 @@ ieee80211_amsdu_subframe_length(void *field, u8 mesh_flags, u8 hdr_type)
bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
{
- int offset = 0, remaining, subframe_len, padding;
+ int offset = 0, subframe_len, padding;
for (offset = 0; offset < skb->len; offset += subframe_len + padding) {
+ int remaining = skb->len - offset;
struct {
__be16 len;
u8 mesh_flags;
} hdr;
u16 len;
+ if (sizeof(hdr) > remaining)
+ return false;
+
if (skb_copy_bits(skb, offset + 2 * ETH_ALEN, &hdr, sizeof(hdr)) < 0)
return false;
@@ -807,7 +811,6 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
mesh_hdr);
subframe_len = sizeof(struct ethhdr) + len;
padding = (4 - subframe_len) & 0x3;
- remaining = skb->len - offset;
if (subframe_len > remaining)
return false;
@@ -825,7 +828,7 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
{
unsigned int hlen = ALIGN(extra_headroom, 4);
struct sk_buff *frame = NULL;
- int offset = 0, remaining;
+ int offset = 0;
struct {
struct ethhdr eth;
uint8_t flags;
@@ -839,10 +842,14 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
copy_len = sizeof(hdr);
while (!last) {
+ int remaining = skb->len - offset;
unsigned int subframe_len;
int len, mesh_len = 0;
u8 padding;
+ if (copy_len > remaining)
+ goto purge;
+
skb_copy_bits(skb, offset, &hdr, copy_len);
if (iftype == NL80211_IFTYPE_MESH_POINT)
mesh_len = __ieee80211_get_mesh_hdrlen(hdr.flags);
@@ -852,7 +859,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
padding = (4 - subframe_len) & 0x3;
/* the last MSDU has no padding */
- remaining = skb->len - offset;
if (subframe_len > remaining)
goto purge;
/* mitigate A-MSDU aggregation injection attacks */
@@ -2073,6 +2079,82 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
}
EXPORT_SYMBOL(ieee80211_operating_class_to_band);
+bool ieee80211_operating_class_to_chandef(u8 operating_class,
+ struct ieee80211_channel *chan,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 control_freq, offset = 0;
+ enum nl80211_band band;
+
+ if (!ieee80211_operating_class_to_band(operating_class, &band) ||
+ !chan || band != chan->band)
+ return false;
+
+ control_freq = chan->center_freq;
+ chandef->chan = chan;
+
+ if (control_freq >= 5955)
+ offset = control_freq - 5955;
+ else if (control_freq >= 5745)
+ offset = control_freq - 5745;
+ else if (control_freq >= 5180)
+ offset = control_freq - 5180;
+ offset /= 20;
+
+ switch (operating_class) {
+ case 81: /* 2 GHz band; 20 MHz; channels 1..13 */
+ case 82: /* 2 GHz band; 20 MHz; channel 14 */
+ case 115: /* 5 GHz band; 20 MHz; channels 36,40,44,48 */
+ case 118: /* 5 GHz band; 20 MHz; channels 52,56,60,64 */
+ case 121: /* 5 GHz band; 20 MHz; channels 100..144 */
+ case 124: /* 5 GHz band; 20 MHz; channels 149,153,157,161 */
+ case 125: /* 5 GHz band; 20 MHz; channels 149..177 */
+ case 131: /* 6 GHz band; 20 MHz; channels 1..233*/
+ case 136: /* 6 GHz band; 20 MHz; channel 2 */
+ chandef->center_freq1 = control_freq;
+ chandef->width = NL80211_CHAN_WIDTH_20;
+ return true;
+ case 83: /* 2 GHz band; 40 MHz; channels 1..9 */
+ case 116: /* 5 GHz band; 40 MHz; channels 36,44 */
+ case 119: /* 5 GHz band; 40 MHz; channels 52,60 */
+ case 122: /* 5 GHz band; 40 MHz; channels 100,108,116,124,132,140 */
+ case 126: /* 5 GHz band; 40 MHz; channels 149,157,165,173 */
+ chandef->center_freq1 = control_freq + 10;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 84: /* 2 GHz band; 40 MHz; channels 5..13 */
+ case 117: /* 5 GHz band; 40 MHz; channels 40,48 */
+ case 120: /* 5 GHz band; 40 MHz; channels 56,64 */
+ case 123: /* 5 GHz band; 40 MHz; channels 104,112,120,128,136,144 */
+ case 127: /* 5 GHz band; 40 MHz; channels 153,161,169,177 */
+ chandef->center_freq1 = control_freq - 10;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 132: /* 6 GHz band; 40 MHz; channels 1,5,..,229*/
+ chandef->center_freq1 = control_freq + 10 - (offset & 1) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 128: /* 5 GHz band; 80 MHz; channels 36..64,100..144,149..177 */
+ case 133: /* 6 GHz band; 80 MHz; channels 1,5,..,229 */
+ chandef->center_freq1 = control_freq + 30 - (offset & 3) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_80;
+ return true;
+ case 129: /* 5 GHz band; 160 MHz; channels 36..64,100..144,149..177 */
+ case 134: /* 6 GHz band; 160 MHz; channels 1,5,..,229 */
+ chandef->center_freq1 = control_freq + 70 - (offset & 7) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ return true;
+ case 130: /* 5 GHz band; 80+80 MHz; channels 36..64,100..144,149..177 */
+ case 135: /* 6 GHz band; 80+80 MHz; channels 1,5,..,229 */
+ /* The center_freq2 of 80+80 MHz is unknown */
+ case 137: /* 6 GHz band; 320 MHz; channels 1,5,..,229 */
+ /* 320-1 or 320-2 channelization is unknown */
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(ieee80211_operating_class_to_chandef);
+
bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class)
{
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
index 68729aa3a5d5..dc72302cbd07 100644
--- a/net/x25/Kconfig
+++ b/net/x25/Kconfig
@@ -17,8 +17,6 @@ config X25
if you want that) and the lower level data link layer protocol LAPB
(say Y to "LAPB Data Link Driver" below if you want that).
- You can read more about X.25 at <https://www.sangoma.com/tutorials/x25/> and
- <http://docwiki.cisco.com/wiki/X.25>.
Information about X.25 for Linux is contained in the files
<file:Documentation/networking/x25.rst> and
<file:Documentation/networking/x25-iface.rst>.
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index f7a7c7798c3b..d18d51412cc0 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -460,12 +460,12 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
goto out;
- len = min_t(unsigned int, len, sizeof(int));
-
rc = -EINVAL;
if (len < 0)
goto out;
+ len = min_t(unsigned int, len, sizeof(int));
+
rc = -EFAULT;
if (put_user(len, optlen))
goto out;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index b78c0e095e22..3404d076a8a3 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -313,10 +313,13 @@ static bool xsk_is_bound(struct xdp_sock *xs)
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
+ struct net_device *dev = xdp->rxq->dev;
+ u32 qid = xdp->rxq->queue_index;
+
if (!xsk_is_bound(xs))
return -ENXIO;
- if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+ if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
return -EINVAL;
if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index 9f8955367275..09dcea0cbbed 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -194,6 +194,7 @@ static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
}
static const struct sock_diag_handler xsk_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_XDP,
.dump = xsk_diag_handler_dump,
};
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index d3b3f9e720b3..fe82e2d07300 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -10,6 +10,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6_stubs.h>
#endif
+#include <net/hotdata.h>
static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb,
struct sock *sk)
@@ -169,7 +170,8 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
- if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
+ if (skb_queue_len(&ctx->out_queue) >=
+ READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
__skb_queue_tail(&ctx->out_queue, skb);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index bd4ce21d76d7..161f535c8b94 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -21,6 +21,7 @@
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
#include <net/dst_metadata.h>
+#include <net/hotdata.h>
#include "xfrm_inout.h"
@@ -764,7 +765,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
trans = this_cpu_ptr(&xfrm_trans_tasklet);
- if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
+ if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c
index 7d5e920141e9..5ea15037ebd1 100644
--- a/net/xfrm/xfrm_interface_bpf.c
+++ b/net/xfrm/xfrm_interface_bpf.c
@@ -93,10 +93,10 @@ __bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bp
__bpf_kfunc_end_defs();
-BTF_SET8_START(xfrm_ifc_kfunc_set)
+BTF_KFUNCS_START(xfrm_ifc_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info)
BTF_ID_FLAGS(func, bpf_skb_set_xfrm_info)
-BTF_SET8_END(xfrm_ifc_kfunc_set)
+BTF_KFUNCS_END(xfrm_ifc_kfunc_set)
static const struct btf_kfunc_id_set xfrm_interface_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index 21d50d75c260..4df5c06e3ece 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -240,7 +240,6 @@ static void xfrmi_dev_free(struct net_device *dev)
struct xfrm_if *xi = netdev_priv(dev);
gro_cells_destroy(&xi->gro_cells);
- free_percpu(dev->tstats);
}
static int xfrmi_create(struct net_device *dev)
@@ -727,7 +726,7 @@ static int xfrmi_get_iflink(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- return xi->p.link;
+ return READ_ONCE(xi->p.link);
}
static const struct net_device_ops xfrmi_netdev_ops = {
@@ -749,6 +748,7 @@ static void xfrmi_dev_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->needs_free_netdev = true;
dev->priv_destructor = xfrmi_dev_free;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
eth_broadcast_addr(dev->broadcast);
@@ -765,15 +765,9 @@ static int xfrmi_dev_init(struct net_device *dev)
struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&xi->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
dev->features |= NETIF_F_LLTX;
dev->features |= XFRMI_FEATURES;
@@ -957,12 +951,12 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
.get_link_net = xfrmi_get_link_net,
};
-static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
+static void __net_exit xfrmi_exit_batch_rtnl(struct list_head *net_exit_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_exit_list, exit_list) {
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
struct xfrm_if __rcu **xip;
@@ -973,18 +967,16 @@ static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
for (xip = &xfrmn->xfrmi[i];
(xi = rtnl_dereference(*xip)) != NULL;
xip = &xi->next)
- unregister_netdevice_queue(xi->dev, &list);
+ unregister_netdevice_queue(xi->dev, dev_to_kill);
}
xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
if (xi)
- unregister_netdevice_queue(xi->dev, &list);
+ unregister_netdevice_queue(xi->dev, dev_to_kill);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations xfrmi_net_ops = {
- .exit_batch = xfrmi_exit_batch_net,
+ .exit_batch_rtnl = xfrmi_exit_batch_rtnl,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
};
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index da6ecc6b3e15..6affe5cd85d8 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -29,6 +29,7 @@
#include <linux/audit.h>
#include <linux/rhashtable.h>
#include <linux/if_tunnel.h>
+#include <linux/icmp.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/inet_ecn.h>
@@ -3505,6 +3506,128 @@ static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int
return 0;
}
+static bool icmp_err_packet(const struct flowi *fl, unsigned short family)
+{
+ const struct flowi4 *fl4 = &fl->u.ip4;
+
+ if (family == AF_INET &&
+ fl4->flowi4_proto == IPPROTO_ICMP &&
+ (fl4->fl4_icmp_type == ICMP_DEST_UNREACH ||
+ fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED))
+ return true;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6) {
+ const struct flowi6 *fl6 = &fl->u.ip6;
+
+ if (fl6->flowi6_proto == IPPROTO_ICMPV6 &&
+ (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH ||
+ fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG ||
+ fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED))
+ return true;
+ }
+#endif
+ return false;
+}
+
+static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
+ const struct flowi *fl, struct flowi *fl1)
+{
+ bool ret = true;
+ struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
+ int hl = family == AF_INET ? (sizeof(struct iphdr) + sizeof(struct icmphdr)) :
+ (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
+
+ if (!newskb)
+ return true;
+
+ if (!pskb_pull(newskb, hl))
+ goto out;
+
+ skb_reset_network_header(newskb);
+
+ if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0)
+ goto out;
+
+ fl1->flowi_oif = fl->flowi_oif;
+ fl1->flowi_mark = fl->flowi_mark;
+ fl1->flowi_tos = fl->flowi_tos;
+ nf_nat_decode_session(newskb, fl1, family);
+ ret = false;
+
+out:
+ consume_skb(newskb);
+ return ret;
+}
+
+static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family,
+ const struct xfrm_selector *sel,
+ const struct flowi *fl)
+{
+ bool ret = false;
+
+ if (icmp_err_packet(fl, family)) {
+ struct flowi fl1;
+
+ if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
+ return ret;
+
+ ret = xfrm_selector_match(sel, &fl1, family);
+ }
+
+ return ret;
+}
+
+static inline struct
+xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb,
+ const struct flowi *fl, unsigned short family,
+ u32 if_id)
+{
+ struct xfrm_policy *pol = NULL;
+
+ if (icmp_err_packet(fl, family)) {
+ struct flowi fl1;
+ struct net *net = dev_net(skb->dev);
+
+ if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
+ return pol;
+
+ pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id);
+ }
+
+ return pol;
+}
+
+static inline struct
+dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl,
+ unsigned short family, struct dst_entry *dst)
+{
+ if (icmp_err_packet(fl, family)) {
+ struct net *net = dev_net(skb->dev);
+ struct dst_entry *dst2;
+ struct flowi fl1;
+
+ if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
+ return dst;
+
+ dst_hold(dst);
+
+ dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP));
+
+ if (IS_ERR(dst2))
+ return dst;
+
+ if (dst2->xfrm) {
+ dst_release(dst);
+ dst = dst2;
+ } else {
+ dst_release(dst2);
+ }
+ }
+
+ return dst;
+}
+
int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
unsigned short family)
{
@@ -3551,9 +3674,17 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
for (i = sp->len - 1; i >= 0; i--) {
struct xfrm_state *x = sp->xvec[i];
+ int ret = 0;
+
if (!xfrm_selector_match(&x->sel, &fl, family)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
- return 0;
+ ret = 1;
+ if (x->props.flags & XFRM_STATE_ICMP &&
+ xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl))
+ ret = 0;
+ if (ret) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
+ return 0;
+ }
}
}
}
@@ -3576,6 +3707,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
return 0;
}
+ if (!pol && dir == XFRM_POLICY_FWD)
+ pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
+
if (!pol) {
if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
@@ -3709,6 +3843,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
res = 0;
dst = NULL;
}
+
+ if (dst && !dst->xfrm)
+ dst = xfrm_out_fwd_icmp(skb, &fl, family, dst);
+
skb_dst_set(skb, dst);
return res;
}
@@ -4027,10 +4165,7 @@ static int __net_init xfrm_policy_init(struct net *net)
int dir, err;
if (net_eq(net, &init_net)) {
- xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
- sizeof(struct xfrm_dst),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL);
+ xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
err = rhashtable_init(&xfrm_policy_inexact_table,
&xfrm_pol_inexact_params);
BUG_ON(err);
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fee9b5cf37a7..5f9bf8e5c933 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -52,6 +52,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
+ xfrm_state_update_stats(net);
snmp_get_cpu_field_batch(buff, xfrm_mib_list,
net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index bda5327bf34d..0c306473a79d 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -570,7 +570,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
int err = 0;
spin_lock(&x->lock);
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
@@ -1935,7 +1935,7 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
@@ -1957,6 +1957,19 @@ int xfrm_state_check_expire(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_check_expire);
+void xfrm_state_update_stats(struct net *net)
+{
+ struct xfrm_state *x;
+ int i;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
+ xfrm_dev_state_update_stats(x);
+ }
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+}
+
struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
diff --git a/net/xfrm/xfrm_state_bpf.c b/net/xfrm/xfrm_state_bpf.c
index 9e20d4a377f7..2248eda741f8 100644
--- a/net/xfrm/xfrm_state_bpf.c
+++ b/net/xfrm/xfrm_state_bpf.c
@@ -117,10 +117,10 @@ __bpf_kfunc void bpf_xdp_xfrm_state_release(struct xfrm_state *x)
__bpf_kfunc_end_defs();
-BTF_SET8_START(xfrm_state_kfunc_set)
+BTF_KFUNCS_START(xfrm_state_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_get_xfrm_state, KF_RET_NULL | KF_ACQUIRE)
BTF_ID_FLAGS(func, bpf_xdp_xfrm_state_release, KF_RELEASE)
-BTF_SET8_END(xfrm_state_kfunc_set)
+BTF_KFUNCS_END(xfrm_state_kfunc_set)
static const struct btf_kfunc_id_set xfrm_state_xdp_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 912c1189ba41..810b520493f3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -902,7 +902,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
if (x->xso.dev)
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay);